DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v1 0/5] add rte flow support for cpfl
@ 2023-08-12  7:55 Yuying Zhang
  2023-08-12  7:55 ` [PATCH v1 1/5] net/cpfl: setup rte flow skeleton Yuying Zhang
                   ` (5 more replies)
  0 siblings, 6 replies; 128+ messages in thread
From: Yuying Zhang @ 2023-08-12  7:55 UTC (permalink / raw)
  To: dev, beilei.xing, qi.z.zhang, jingjing.wu; +Cc: Yuying Zhang

This patchset add rte_flow support for cpfl driver.
It depends on the following two patch sets:
http://patchwork.dpdk.org/project/dpdk/cover/20230809155134.539287-1-beilei.xing@intel.com/
http://patchwork.dpdk.org/project/dpdk/cover/20230811100012.2078135-1-wenjing.qiao@intel.com/

Yuying Zhang (5):
  net/cpfl: setup rte flow skeleton
  common/idpf/base: refine idpf ctlq message structure
  net/cpfl: add cpfl control queue message handle
  net/cpfl: add fxp rule module
  net/cpfl: add fxp flow engine

 drivers/common/idpf/base/idpf_controlq_api.h |   3 +
 drivers/net/cpfl/cpfl_controlq.c             | 419 ++++++++++++-
 drivers/net/cpfl/cpfl_controlq.h             |  24 +
 drivers/net/cpfl/cpfl_ethdev.c               |  54 ++
 drivers/net/cpfl/cpfl_ethdev.h               |  94 +++
 drivers/net/cpfl/cpfl_flow.c                 | 331 ++++++++++
 drivers/net/cpfl/cpfl_flow.h                 |  88 +++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c      | 610 +++++++++++++++++++
 drivers/net/cpfl/cpfl_fxp_rule.c             | 288 +++++++++
 drivers/net/cpfl/cpfl_fxp_rule.h             |  87 +++
 drivers/net/cpfl/meson.build                 |   5 +-
 11 files changed, 2001 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h

-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v1 1/5] net/cpfl: setup rte flow skeleton
  2023-08-12  7:55 [PATCH v1 0/5] add rte flow support for cpfl Yuying Zhang
@ 2023-08-12  7:55 ` Yuying Zhang
  2023-08-25  3:55   ` Xing, Beilei
  2023-08-12  7:55 ` [PATCH v1 2/5] common/idpf/base: refine idpf ctlq message structure Yuying Zhang
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 128+ messages in thread
From: Yuying Zhang @ 2023-08-12  7:55 UTC (permalink / raw)
  To: dev, beilei.xing, qi.z.zhang, jingjing.wu; +Cc: Yuying Zhang

Setup the rte_flow backend skeleton. Introduce the framework
to support different engines as rte_flow backend. Bridge rte_flow
driver API to flow engines.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  54 ++++++
 drivers/net/cpfl/cpfl_ethdev.h |   5 +
 drivers/net/cpfl/cpfl_flow.c   | 331 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow.h   |  88 +++++++++
 drivers/net/cpfl/meson.build   |   3 +-
 5 files changed, 480 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 34b7c22ee1..23e5181588 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@
 #include "cpfl_ethdev.h"
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1199,6 +1200,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+	struct rte_flow *p_flow;
+
+	while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+		TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+		if (p_flow->engine->free)
+			p_flow->engine->free(p_flow);
+		rte_free(p_flow);
+	}
+}
+
 static int
 cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
 {
@@ -1231,6 +1245,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 		cpfl_p2p_queue_grps_del(vport);
 
 	if (!cpfl_vport->exceptional) {
+		cpfl_flow_free(cpfl_vport);
 		adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
 		adapter->cur_vport_nb--;
 		adapter->vports[vport->sw_idx] = NULL;
@@ -1248,6 +1263,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+		      const struct rte_flow_ops **ops)
+{
+	struct cpfl_itf *itf;
+
+	if (!dev)
+		return -EINVAL;
+
+	itf = CPFL_DEV_TO_ITF(dev);
+
+	/* only vport support rte_flow */
+	if (itf->type != CPFL_ITF_TYPE_VPORT)
+		return -ENOTSUP;
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static int
 cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 			    size_t len, uint32_t tx)
@@ -1449,6 +1487,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.xstats_get			= cpfl_dev_xstats_get,
 	.xstats_get_names		= cpfl_dev_xstats_get_names,
 	.xstats_reset			= cpfl_dev_xstats_reset,
+	.flow_ops_get			= cpfl_dev_flow_ops_get,
 	.hairpin_cap_get		= cpfl_hairpin_cap_get,
 	.rx_hairpin_queue_setup		= cpfl_rx_hairpin_queue_setup,
 	.tx_hairpin_queue_setup		= cpfl_tx_hairpin_queue_setup,
@@ -2411,6 +2450,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_create_ctrl_vport;
 	}
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	ret = cpfl_flow_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init flow module");
+		goto err_flow_init;
+	}
+#endif
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2418,6 +2464,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+err_flow_init:
+	cpfl_ctrl_path_close(adapter);
+#endif
 err_create_ctrl_vport:
 	rte_free(adapter->vports);
 err_vports_alloc:
@@ -2574,6 +2624,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
 	cpfl_vport->itf.adapter = adapter;
 	cpfl_vport->itf.data = dev->data;
+	TAILQ_INIT(&cpfl_vport->itf.flow_list);
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
@@ -2713,6 +2764,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	cpfl_flow_uninit(adapter);
+#endif
 	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 2e9480ffc1..c71f16ac60 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -143,9 +143,12 @@ enum cpfl_itf_type {
 	CPFL_ITF_TYPE_REPRESENTOR
 };
 
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
+	struct cpfl_flow_list flow_list;
 	void *data;
 };
 
@@ -222,6 +225,8 @@ struct cpfl_adapter_ext {
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_whitelist_hash;
 
+	struct cpfl_flow_js_parser *flow_parser;
+
 	/* ctrl vport and ctrl queues. */
 	struct cpfl_vport ctrl_vport;
 	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c
new file mode 100644
index 0000000000..e303936081
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+	TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->parse_pattern_action)
+			continue;
+		if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+			continue;
+		return engine;
+	}
+
+	return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+	int ret;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->init) {
+			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+				     engine->type);
+			return -ENOTSUP;
+		}
+
+		ret = engine->init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+				     engine->type);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (engine->uninit)
+			engine->uninit(adapter);
+	}
+}
+
+static int
+cpfl_flow_valid_attr(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	if (attr->priority > 6) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Only support priority 0-6.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	int ret;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = cpfl_flow_valid_attr(attr, error);
+	if (ret)
+		return ret;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct cpfl_flow_engine *engine;
+	int ret;
+
+	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+	if (ret)
+		return ret;
+
+	engine = cpfl_flow_engine_match(dev, attr, pattern, actions, NULL);
+
+	if (!engine) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched engine.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev __rte_unused,
+		 const struct rte_flow_attr *attr __rte_unused,
+		 const struct rte_flow_item pattern[] __rte_unused,
+		 const struct rte_flow_action actions[] __rte_unused,
+		 struct rte_flow_error *error __rte_unused)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_engine *engine;
+	struct rte_flow *flow;
+	void *meta;
+	int ret;
+
+	flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return NULL;
+	}
+
+	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	engine = cpfl_flow_engine_match(dev, attr, pattern, actions, &meta);
+	if (!engine) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched engine");
+		rte_free(flow);
+		return NULL;
+	}
+
+	if (!engine->create) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched flow creation function");
+		rte_free(flow);
+		return NULL;
+	}
+
+	ret = engine->create(dev, flow, meta, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	flow->engine = engine;
+	TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+	return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	int ret = 0;
+
+	if (!flow || !flow->engine || !flow->engine->destroy) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	ret = flow->engine->destroy(dev, flow, error);
+	if (!ret)
+		TAILQ_REMOVE(&itf->flow_list, flow, next);
+	else
+		PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+	return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct rte_flow *p_flow;
+	void *temp;
+	int ret = 0;
+
+	RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+		ret = cpfl_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to flush flows");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev __rte_unused,
+		struct rte_flow *flow __rte_unused,
+		const struct rte_flow_action *actions __rte_unused,
+		void *data __rte_unused,
+		struct rte_flow_error *error __rte_unused)
+{
+	struct rte_flow_query_count *count = data;
+	int ret = -EINVAL;
+
+	if (!flow || !flow->engine || !flow->engine->query_count) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			ret = flow->engine->query_count(dev, flow, count, error);
+			break;
+		default:
+			ret = rte_flow_error_set(error, ENOTSUP,
+						 RTE_FLOW_ERROR_TYPE_ACTION,
+						 actions,
+						 "action not supported");
+			break;
+		}
+	}
+
+	return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+	.validate = cpfl_flow_validate,
+	.create = cpfl_flow_create,
+	.destroy = cpfl_flow_destroy,
+	.flush = cpfl_flow_flush,
+	.query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+	int ret;
+
+	if (ad->devargs.flow_parser[0] == '\0') {
+		PMD_INIT_LOG(WARNING, "flow module is not initialized");
+		return 0;
+	}
+
+	ret = cpfl_flow_engine_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init flow engines");
+		goto err;
+	}
+
+	ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to create flow parser");
+		goto err;
+	}
+
+	return ret;
+
+err:
+	cpfl_flow_engine_uninit(ad);
+	return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+	if (ad->devargs.flow_parser[0] == '\0')
+		return;
+
+	cpfl_parser_destroy(ad->flow_parser);
+	cpfl_flow_engine_uninit(ad);
+}
diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h
new file mode 100644
index 0000000000..04f4cc1149
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+	CPFL_FLOW_ENGINE_NONE = 0,
+	CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+			       struct rte_flow *flow,
+			       void *meta,
+			       struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+				struct rte_flow *flow,
+				struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+			      struct rte_flow *flow,
+			      struct rte_flow_query_count *count,
+			      struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+					     const struct rte_flow_attr *attr,
+					     const struct rte_flow_item pattern[],
+					     const struct rte_flow_action actions[],
+					     void **meta);
+
+struct cpfl_flow_engine {
+	TAILQ_ENTRY(cpfl_flow_engine) node;
+	enum cpfl_flow_engine_type type;
+	engine_init_t init;
+	engine_uninit_t uninit;
+	engine_create_t create;
+	engine_destroy_t destroy;
+	engine_query_t query_count;
+	engine_free_t free;
+	engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) next;
+	struct cpfl_flow_engine *engine;
+	void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta);
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+				  const struct rte_flow_attr *attr,
+				  const struct rte_flow_item pattern[],
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+		    struct rte_flow *flow,
+		    const struct rte_flow_action *actions,
+		    void *data,
+		    struct rte_flow_error *error);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 84ba994469..222497f7c2 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -42,10 +42,11 @@ endif
 js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
     sources += files(
+        'cpfl_flow.c',
         'cpfl_flow_parser.c',
         'cpfl_rules.c',
         'cpfl_controlq.c',
     )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
-endif
\ No newline at end of file
+endif
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v1 2/5] common/idpf/base: refine idpf ctlq message structure
  2023-08-12  7:55 [PATCH v1 0/5] add rte flow support for cpfl Yuying Zhang
  2023-08-12  7:55 ` [PATCH v1 1/5] net/cpfl: setup rte flow skeleton Yuying Zhang
@ 2023-08-12  7:55 ` Yuying Zhang
  2023-08-25  5:55   ` Xing, Beilei
  2023-08-12  7:55 ` [PATCH v1 3/5] net/cpfl: add cpfl control queue message handle Yuying Zhang
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 128+ messages in thread
From: Yuying Zhang @ 2023-08-12  7:55 UTC (permalink / raw)
  To: dev, beilei.xing, qi.z.zhang, jingjing.wu; +Cc: Yuying Zhang

Add cfg data in idpf_ctlq_msg.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/common/idpf/base/idpf_controlq_api.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/common/idpf/base/idpf_controlq_api.h b/drivers/common/idpf/base/idpf_controlq_api.h
index 3780304256..b38b10465c 100644
--- a/drivers/common/idpf/base/idpf_controlq_api.h
+++ b/drivers/common/idpf/base/idpf_controlq_api.h
@@ -65,6 +65,9 @@ struct idpf_ctlq_msg {
 			u32 chnl_opcode;
 			u32 chnl_retval;
 		} mbx;
+		struct {
+			u64 data;
+		} cfg;
 	} cookie;
 	union {
 #define IDPF_DIRECT_CTX_SIZE	16
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v1 3/5] net/cpfl: add cpfl control queue message handle
  2023-08-12  7:55 [PATCH v1 0/5] add rte flow support for cpfl Yuying Zhang
  2023-08-12  7:55 ` [PATCH v1 1/5] net/cpfl: setup rte flow skeleton Yuying Zhang
  2023-08-12  7:55 ` [PATCH v1 2/5] common/idpf/base: refine idpf ctlq message structure Yuying Zhang
@ 2023-08-12  7:55 ` Yuying Zhang
  2023-08-25  6:23   ` Xing, Beilei
  2023-08-12  7:55 ` [PATCH v1 4/5] net/cpfl: add fxp rule module Yuying Zhang
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 128+ messages in thread
From: Yuying Zhang @ 2023-08-12  7:55 UTC (permalink / raw)
  To: dev, beilei.xing, qi.z.zhang, jingjing.wu; +Cc: Yuying Zhang

Add cpfl driver control queue message handle, including
send/receive/clean/post_rx_buffs.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_controlq.c | 419 ++++++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_controlq.h |  24 ++
 2 files changed, 442 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
index 97a6bdd042..c696a529a7 100644
--- a/drivers/net/cpfl/cpfl_controlq.c
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2023 Intel Corporation
+ * Copyright(c) 2023 Intel Corporation
  */
 
 #include "cpfl_controlq.h"
@@ -332,6 +332,395 @@ cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
 	return status;
 }
 
+/**
+ * cpfl_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+	       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_desc *desc;
+	int num_desc_avail = 0;
+	int status = 0;
+	int i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	/* Ensure there are enough descriptors to send all messages */
+	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+		status = -ENOSPC;
+		goto sq_send_command_out;
+	}
+
+	for (i = 0; i < num_q_msg; i++) {
+		struct idpf_ctlq_msg *msg = &q_msg[i];
+		uint64_t msg_cookie;
+
+		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+		desc->opcode = CPU_TO_LE16(msg->opcode);
+		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+		msg_cookie = *(u64 *)&msg->cookie;
+		desc->cookie_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));
+		desc->cookie_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));
+		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+					   IDPF_CTLQ_FLAG_HOST_ID_S);
+		if (msg->data_len) {
+			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+			desc->datalen |= CPU_TO_LE16(msg->data_len);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+			/* Update the address values in the desc with the pa
+			 * value for respective buffer
+			 */
+			desc->params.indirect.addr_high =
+				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+			desc->params.indirect.addr_low =
+				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+				IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		} else {
+			idpf_memcpy(&desc->params, msg->ctx.direct,
+				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		}
+
+		/* Store buffer info */
+		cq->bi.tx_msg[cq->next_to_use] = msg;
+		(cq->next_to_use)++;
+		if (cq->next_to_use == cq->ring_size)
+			cq->next_to_use = 0;
+	}
+
+	/* Force memory write to complete before letting hardware
+	 * know that there are new descriptors to fetch.
+	 */
+	idpf_wmb();
+	wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+	idpf_release_lock(&cq->cq_lock);
+
+	return status;
+}
+
+/**
+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
+ * back for the requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ * @force: (input) clean descriptors which were not done yet. Use with caution
+ * in kernel mode only
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+static int
+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		     struct idpf_ctlq_msg *msg_status[], bool force)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t i = 0, num_to_clean;
+	uint16_t ntc, desc_err;
+	int ret = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+	if (*clean_count == 0)
+		return 0;
+	if (*clean_count > cq->ring_size)
+		return -EINVAL;
+
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *clean_count;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		desc_err = LE16_TO_CPU(desc->ret_val);
+		if (desc_err) {
+			/* strip off FW internal code */
+			desc_err &= 0xff;
+		}
+
+		msg_status[i] = cq->bi.tx_msg[ntc];
+		if (!msg_status[i])
+			break;
+		msg_status[i]->status = desc_err;
+		cq->bi.tx_msg[ntc] = NULL;
+		/* Zero out any stale data */
+		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	}
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+
+	/* Return number of descriptors actually cleaned */
+	*clean_count = i;
+
+	return ret;
+}
+
+/**
+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		   struct idpf_ctlq_msg *msg_status[])
+{
+	return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
+}
+
+/**
+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t ntp = cq->next_to_post;
+	bool buffs_avail = false;
+	uint16_t tbp = ntp + 1;
+	int status = 0;
+	int i = 0;
+
+	if (*buff_count > cq->ring_size)
+		return -EINVAL;
+	if (*buff_count > 0)
+		buffs_avail = true;
+	idpf_acquire_lock(&cq->cq_lock);
+	if (tbp >= cq->ring_size)
+		tbp = 0;
+	if (tbp == cq->next_to_clean)
+		goto post_buffs_out;
+
+	/* Post buffers for as many as provided or up until the last one used */
+	while (ntp != cq->next_to_clean) {
+		desc = IDPF_CTLQ_DESC(cq, ntp);
+		if (cq->bi.rx_buff[ntp])
+			goto fill_desc;
+		if (!buffs_avail) {
+			/* If the caller hasn't given us any buffers or
+			 * there are none left, search the ring itself
+			 * for an available buffer to move to this
+			 * entry starting at the next entry in the ring
+			 */
+			tbp = ntp + 1;
+			/* Wrap ring if necessary */
+			if (tbp >= cq->ring_size)
+				tbp = 0;
+
+			while (tbp != cq->next_to_clean) {
+				if (cq->bi.rx_buff[tbp]) {
+					cq->bi.rx_buff[ntp] =
+						cq->bi.rx_buff[tbp];
+					cq->bi.rx_buff[tbp] = NULL;
+					/* Found a buffer, no need to
+					 * search anymore
+					 */
+					break;
+				}
+				/* Wrap ring if necessary */
+				tbp++;
+				if (tbp >= cq->ring_size)
+					tbp = 0;
+			}
+
+			if (tbp == cq->next_to_clean)
+				goto post_buffs_out;
+		} else {
+			/* Give back pointer to DMA buffer */
+			cq->bi.rx_buff[ntp] = buffs[i];
+			i++;
+
+			if (i >= *buff_count)
+				buffs_avail = false;
+		}
+
+fill_desc:
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+		/* Post buffers to descriptor */
+		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+		ntp++;
+		if (ntp == cq->ring_size)
+			ntp = 0;
+	}
+
+post_buffs_out:
+	/* Only update tail if buffers were actually posted */
+	if (cq->next_to_post != ntp) {
+		if (ntp)
+			/* Update next_to_post to ntp - 1 since current ntp
+			 * will not have a buffer
+			 */
+			cq->next_to_post = ntp - 1;
+		else
+			/* Wrap to end of end ring since current ntp is 0 */
+			cq->next_to_post = cq->ring_size - 1;
+
+		wr32(hw, cq->reg.tail, cq->next_to_post);
+	}
+
+	idpf_release_lock(&cq->cq_lock);
+
+	/* return the number of buffers that were not posted */
+	*buff_count = *buff_count - i;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+	       struct idpf_ctlq_msg *q_msg)
+{
+	uint16_t num_to_clean, ntc, ret_val, flags;
+	struct idpf_ctlq_desc *desc;
+	int ret_code = 0;
+	uint16_t i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*num_q_msg == 0)
+		return 0;
+	else if (*num_q_msg > cq->ring_size)
+		return -EINVAL;
+
+	/* take the lock before we start messing with the ring */
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *num_q_msg;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		flags = LE16_TO_CPU(desc->flags);
+		if (!(flags & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		ret_val = LE16_TO_CPU(desc->ret_val);
+		q_msg[i].vmvf_type = (flags &
+				     (IDPF_CTLQ_FLAG_FTYPE_VM |
+				      IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+				      IDPF_CTLQ_FLAG_FTYPE_S;
+
+		if (flags & IDPF_CTLQ_FLAG_ERR)
+			ret_code = -EBADMSG;
+
+		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
+		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
+		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+		q_msg[i].status = ret_val;
+
+		if (desc->datalen) {
+			idpf_memcpy(q_msg[i].ctx.indirect.context,
+				    &desc->params.indirect,
+				    IDPF_INDIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+			/* Assign pointer to dma buffer to ctlq_msg array
+			 * to be given to upper layer
+			 */
+			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+			/* Zero out pointer to DMA buffer info;
+			 * will be repopulated by post buffers API
+			 */
+			cq->bi.rx_buff[ntc] = NULL;
+		} else {
+			idpf_memcpy(q_msg[i].ctx.direct,
+				    desc->params.raw,
+				    IDPF_DIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+		}
+
+		/* Zero out stale data in descriptor */
+		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+			    IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	};
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+	*num_q_msg = i;
+	if (*num_q_msg == 0)
+		ret_code = -ENOMSG;
+
+	return ret_code;
+}
+
 int
 cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
 		    struct idpf_ctlq_info **cq)
@@ -378,3 +767,31 @@ cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
 {
 	cpfl_ctlq_remove(hw, cq);
 }
+
+int
+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		     struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			      uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
+
+int
+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			 struct idpf_ctlq_msg *msg_status[])
+{
+	return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
index 930d717f63..3fd658cc36 100644
--- a/drivers/net/cpfl/cpfl_controlq.h
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -14,6 +14,13 @@
 #define CPFL_DFLT_MBX_RING_LEN		512
 #define CPFL_CFGQ_RING_LEN		512
 
+/* CRQ/CSQ specific error codes */
+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */
+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */
+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */
+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */
+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */
+
 /* Generic queue info structures */
 /* MB, CONFIG and EVENT q do not have extended info */
 struct cpfl_ctlq_create_info {
@@ -44,8 +51,25 @@ int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
 int cpfl_ctlq_add(struct idpf_hw *hw,
 		  struct cpfl_ctlq_create_info *qinfo,
 		  struct idpf_ctlq_info **cq);
+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		   uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		       struct idpf_ctlq_msg *msg_status[]);
+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			    uint16_t *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg);
 int cpfl_vport_ctlq_add(struct idpf_hw *hw,
 			struct cpfl_ctlq_create_info *qinfo,
 			struct idpf_ctlq_info **cq);
 void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			 uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+			 struct idpf_ctlq_msg q_msg[]);
+
+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+				  uint16_t *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			     struct idpf_ctlq_msg *msg_status[]);
 #endif
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v1 4/5] net/cpfl: add fxp rule module
  2023-08-12  7:55 [PATCH v1 0/5] add rte flow support for cpfl Yuying Zhang
                   ` (2 preceding siblings ...)
  2023-08-12  7:55 ` [PATCH v1 3/5] net/cpfl: add cpfl control queue message handle Yuying Zhang
@ 2023-08-12  7:55 ` Yuying Zhang
  2023-08-25  7:35   ` Xing, Beilei
  2023-08-25  8:42   ` Xing, Beilei
  2023-08-12  7:55 ` [PATCH v1 5/5] net/cpfl: add fxp flow engine Yuying Zhang
  2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
  5 siblings, 2 replies; 128+ messages in thread
From: Yuying Zhang @ 2023-08-12  7:55 UTC (permalink / raw)
  To: dev, beilei.xing, qi.z.zhang, jingjing.wu; +Cc: Yuying Zhang

Added low level fxp module for rule packing / creation / destroying.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h   |   4 +
 drivers/net/cpfl/cpfl_fxp_rule.c | 288 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_fxp_rule.h |  87 ++++++++++
 drivers/net/cpfl/meson.build     |   1 +
 4 files changed, 380 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index c71f16ac60..63bcc5551f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -145,10 +145,14 @@ enum cpfl_itf_type {
 
 TAILQ_HEAD(cpfl_flow_list, rte_flow);
 
+#define CPFL_FLOW_BATCH_SIZE  490
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
 	struct cpfl_flow_list flow_list;
+	struct idpf_dma_mem flow_dma;
+	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
 	void *data;
 };
 
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
new file mode 100644
index 0000000000..936f57e4fa
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg,
+		   struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_msg **msg_ptr_list;
+	uint16_t clean_count = 0;
+	int num_cleaned = 0;
+	int retries = 0;
+	int ret = 0;
+
+	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+	if (!msg_ptr_list) {
+		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+		goto send_err;
+	}
+
+	while (retries <= CTLQ_SEND_RETRIES) {
+		clean_count = num_q_msg - num_cleaned;
+		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+					       &msg_ptr_list[num_cleaned]);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+			goto send_err;
+		}
+
+		num_cleaned += clean_count;
+		retries++;
+		if (num_cleaned >= num_q_msg)
+			break;
+		rte_delay_us_sleep(10);
+	}
+
+	if (retries > CTLQ_SEND_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for completions");
+		ret = -1;
+		goto send_err;
+	}
+
+send_err:
+	if (msg_ptr_list)
+		free(msg_ptr_list);
+err:
+	return ret;
+}
+
+static int
+cpfl_process_rx_ctlq_msg(uint16_t num_q_msg, struct idpf_ctlq_msg *q_msg)
+{
+	uint16_t i;
+	int ret = 0;
+
+	if (!num_q_msg || !q_msg)
+		return -EINVAL;
+
+	for (i = 0; i < num_q_msg; i++) {
+		switch (q_msg[i].status) {
+		case CPFL_CFG_PKT_ERR_OK:
+			continue;
+		case CPFL_CFG_PKT_ERR_EEXIST:
+			PMD_INIT_LOG(ERR, "The rule has confliction with already existed one");
+			return -EINVAL;
+		case CPFL_CFG_PKT_ERR_ENOTFND:
+			PMD_INIT_LOG(ERR, "The rule has already deleted");
+			return -EINVAL;
+		default:
+			PMD_INIT_LOG(ERR, "Invalid rule");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg,
+		      struct idpf_ctlq_msg q_msg[])
+{
+	int retries = 0;
+	struct idpf_dma_mem *dma;
+	uint16_t i;
+	uint16_t buff_cnt;
+	int ret = 0;
+
+	retries = 0;
+	while (retries <= CTLQ_RECEIVE_RETRIES) {
+		rte_delay_us_sleep(10);
+		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+		    ret != CPFL_ERR_CTLQ_ERROR) {
+			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_EMPTY)
+			break;
+
+		ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
+		if (ret) {
+			PMD_INIT_LOG(WARNING, "failed to process rx_ctrlq msg");
+			break;
+		}
+
+		for (i = 0; i < num_q_msg; i++) {
+			if (q_msg[i].data_len > 0)
+				dma = q_msg[i].ctx.indirect.payload;
+			else
+				dma = NULL;
+
+			buff_cnt = dma ? 1 : 0;
+			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+			if (ret)
+				PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+		}
+		break;
+	}
+
+	if (retries > CTLQ_RECEIVE_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+static int
+pack_mod_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+	      struct idpf_ctlq_msg *msg)
+{
+	struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	struct cpfl_rule_cfg_data cfg = {0};
+
+	/* prepare rule blob */
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(&cfg, 0, sizeof(cfg));
+
+	/* fill info for both query and add/update */
+	cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+				   minfo->pin_mod_content,
+				   minfo->mod_index,
+				   &cfg.ext.mod_content);
+
+	/* only fill content for add/update */
+	memcpy(blob->mod_blob, minfo->mod_content,
+	       minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+	/* pack message */
+	cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+				       rinfo->cookie,
+				       0, /* vsi_id not used for mod */
+				       rinfo->port_num,
+				       NO_HOST_NEEDED,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       (u16)sizeof(*blob),
+				       (void *)dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int pack_default_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+			     struct idpf_ctlq_msg *msg, bool add)
+{
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	struct cpfl_rule_cfg_data cfg;
+	uint16_t cfg_ctrl;
+
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(msg, 0, sizeof(*msg));
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+							  rinfo->sem.sub_prof_id,
+							  rinfo->sem.pin_to_cache,
+							  rinfo->sem.fixed_fetch);
+		cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+					rinfo->act_bytes, rinfo->act_byte_len,
+					cfg_ctrl, blob);
+		opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+	}
+
+	cpfl_fill_rule_cfg_data_common(opc,
+				       rinfo->cookie,
+				       rinfo->vsi,
+				       rinfo->port_num,
+				       rinfo->host_id,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int pack_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		     struct idpf_ctlq_msg *msg, bool add)
+{
+	int ret = 0;
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		if (pack_default_rule(rinfo, dma, msg, add) < 0)
+			ret = -1;
+	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+		if (pack_mod_rule(rinfo, dma, msg) < 0)
+			ret = -1;
+	}
+
+	return ret;
+}
+
+int
+cpfl_rule_update(struct cpfl_itf *itf,
+		 struct idpf_ctlq_info *tx_cq,
+		 struct idpf_ctlq_info *rx_cq,
+		 struct cpfl_rule_info *rinfo,
+		 int rule_num,
+		 bool add)
+{
+	struct idpf_hw *hw = &itf->adapter->base.hw;
+	int i;
+	int ret = 0;
+
+	if (rule_num == 0)
+		return 0;
+
+	for (i = 0; i < rule_num; i++) {
+		ret = pack_rule(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Could not create rule");
+			return ret;
+		}
+	}
+	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to send rule");
+		return ret;
+	}
+	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to create rule");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
new file mode 100644
index 0000000000..68efa8e3f8
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+	uint16_t prof_id;
+	uint8_t sub_prof_id;
+	uint8_t key[CPFL_MAX_KEY_LEN];
+	uint8_t key_byte_len;
+	uint8_t pin_to_cache;
+	uint8_t fixed_fetch;
+};
+
+struct cpfl_lem_rule_info {
+	uint16_t prof_id;
+	uint8_t key[CPFL_MAX_KEY_LEN];
+	uint8_t key_byte_len;
+	uint8_t pin_to_cache;
+	uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+	uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+	uint8_t mod_content_byte_len;
+	uint32_t mod_index;
+	uint8_t pin_mod_content;
+	uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+	CPFL_RULE_TYPE_NONE,
+	CPFL_RULE_TYPE_SEM,
+	CPFL_RULE_TYPE_LEM,
+	CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+	enum cpfl_rule_type type;
+	uint64_t cookie;
+	uint8_t host_id;
+	uint8_t port_num;
+	uint8_t resp_req;
+	/* TODO: change this to be dynamically allocated/reallocated */
+	uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+	uint8_t act_byte_len;
+	/* vsi is used for lem and lpm rules */
+	uint16_t vsi;
+	uint8_t clear_mirror_1st_state;
+	/* mod related fields */
+	union {
+		struct cpfl_mod_rule_info mod;
+		struct cpfl_sem_rule_info sem;
+		struct cpfl_lem_rule_info lem;
+	};
+};
+
+struct cpfl_meter_action_info {
+	uint8_t meter_logic_bank_id;
+	uint32_t meter_logic_idx;
+	uint8_t prof_id;
+	uint8_t slot;
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_update(struct cpfl_itf *itf,
+		     struct idpf_ctlq_info *tx_cq,
+		     struct idpf_ctlq_info *rx_cq,
+		     struct cpfl_rule_info *rinfo,
+		     int rule_num,
+		     bool add);
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg,
+		   struct idpf_ctlq_msg q_msg[]);
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg,
+		      struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 222497f7c2..4061123034 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -46,6 +46,7 @@ if js_dep.found()
         'cpfl_flow_parser.c',
         'cpfl_rules.c',
         'cpfl_controlq.c',
+	'cpfl_fxp_rule.c',
     )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v1 5/5] net/cpfl: add fxp flow engine
  2023-08-12  7:55 [PATCH v1 0/5] add rte flow support for cpfl Yuying Zhang
                   ` (3 preceding siblings ...)
  2023-08-12  7:55 ` [PATCH v1 4/5] net/cpfl: add fxp rule module Yuying Zhang
@ 2023-08-12  7:55 ` Yuying Zhang
  2023-08-25  9:15   ` Xing, Beilei
  2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
  5 siblings, 1 reply; 128+ messages in thread
From: Yuying Zhang @ 2023-08-12  7:55 UTC (permalink / raw)
  To: dev, beilei.xing, qi.z.zhang, jingjing.wu; +Cc: Yuying Zhang

Adapt fxp low level as a flow engine.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h          |  85 ++++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 610 ++++++++++++++++++++++++
 drivers/net/cpfl/meson.build            |   1 +
 3 files changed, 696 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 63bcc5551f..d7e9ea1a74 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -92,6 +92,8 @@
 
 #define CPFL_RX_CFGQ_NUM	4
 #define CPFL_TX_CFGQ_NUM	4
+#define CPFL_FPCP_CFGQ_TX       0
+#define CPFL_FPCP_CFGQ_RX       1
 #define CPFL_CFGQ_NUM		8
 
 struct cpfl_vport_param {
@@ -230,6 +232,8 @@ struct cpfl_adapter_ext {
 	struct rte_hash *repr_whitelist_hash;
 
 	struct cpfl_flow_js_parser *flow_parser;
+	struct rte_bitmap *mod_bm;
+	void *mod_bm_mem;
 
 	/* ctrl vport and ctrl queues. */
 	struct cpfl_vport ctrl_vport;
@@ -265,5 +269,86 @@ int cpfl_packets_dispatch(void *arg);
 	((struct cpfl_repr *)((dev)->data->dev_private))
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
+#define CPFL_INVALID_HW_ID      UINT16_MAX
+
+static inline uint16_t
+cpfl_get_port_id(struct cpfl_itf *itf)
+{
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		struct cpfl_vport *vport = (void *)itf;
+
+		return vport->base.devarg_id;
+	}
+
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline uint16_t
+cpfl_get_vsi_id(struct cpfl_itf *itf)
+{
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_vport_info *info;
+	uint32_t vport_id;
+	int ret;
+	struct cpfl_vport_id vport_identity;
+
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		struct cpfl_repr *repr = (void *)itf;
+
+		return repr->vport_info->vport_info.vsi_id;
+	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
+		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		/* host: HOST0_CPF_ID, acc: ACC_CPF_ID */
+		vport_identity.pf_id = ACC_CPF_ID;
+		vport_identity.vf_id = 0;
+		vport_identity.vport_id = vport_id;
+
+		ret = rte_hash_lookup_data(adapter->vport_map_hash, &vport_identity,
+					  (void **)&info);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "vport id not exist");
+			goto err;
+		}
+
+		/* rte_spinlock_unlock(&adapter->vport_map_lock); */
+		return info->vport_info.vsi_id;
+	}
+
+err:
+	/* rte_spinlock_unlock(&adapter->vport_map_lock); */
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline struct cpfl_itf *
+cpfl_get_itf_by_port_id(uint16_t port_id)
+{
+	struct rte_eth_dev *dev;
+
+	if (port_id >= RTE_MAX_ETHPORTS) {
+		PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS);
+		return NULL;
+	}
+
+	dev = &rte_eth_devices[port_id];
+
+	if (dev->state == RTE_ETH_DEV_UNUSED) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id);
+		return NULL;
+	}
+
+	if (!dev->data) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id);
+		return NULL;
+	}
+
+	return CPFL_DEV_TO_ITF(dev);
+}
 
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
new file mode 100644
index 0000000000..e10639c842
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -0,0 +1,610 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <math.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include "cpfl_rules.h"
+#include "cpfl_logs.h"
+#include "cpfl_ethdev.h"
+#include "cpfl_flow.h"
+#include "cpfl_fxp_rule.h"
+#include "cpfl_flow_parser.h"
+#include "rte_memcpy.h"
+
+#define COOKIE_DEF	0x1000
+#define PREC_MAX	7
+#define PREC_DEF	1
+#define PREC_SET	5
+#define TYPE_ID		3
+#define OFFSET		0x0a
+#define HOST_ID_DEF	0
+#define PF_NUM_DEF	0
+#define PORT_NUM_DEF	0
+#define RESP_REQ_DEF	2
+#define PIN_TO_CACHE_DEF	0
+#define CLEAR_MIRROR_1ST_STATE_DEF  0
+#define FIXED_FETCH_DEF 0
+#define PTI_DEF		0
+#define MOD_OBJ_SIZE_DEF	0
+#define PIN_MOD_CONTENT_DEF	0
+
+#define MAX_MOD_CONTENT_INDEX	256
+#define MAX_MR_ACTION_NUM 8
+
+struct rule_info_meta {
+	struct cpfl_flow_pr_action pr_action;
+	uint32_t pr_num;
+	uint32_t mr_num;
+	uint32_t rule_num;
+	struct cpfl_rule_info rules[0];
+};
+
+static uint32_t fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad);
+static void fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
+uint64_t rule_cookie = COOKIE_DEF;
+
+static int
+cpfl_fxp_create(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct rule_info_meta *rim = meta;
+	struct cpfl_vport *vport;
+
+	if (!rim)
+		return ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		cpq_id = CPFL_FPCP_CFGQ_TX;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_update(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
+			       rim->rules, rim->rule_num, true);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "cpfl filter create flow fail");
+		rte_free(rim);
+		return ret;
+	}
+
+	flow->rule = rim;
+
+	return ret;
+}
+
+static inline void
+cpfl_fxp_rule_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+	flow->rule = NULL;
+}
+
+static int
+cpfl_fxp_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct rule_info_meta *rim;
+	uint32_t i;
+	struct cpfl_vport *vport;
+
+	rim = flow->rule;
+	if (!rim) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "no such flow create by cpfl filter");
+
+		cpfl_fxp_rule_free(flow);
+
+		return -rte_errno;
+	}
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		cpq_id = CPFL_FPCP_CFGQ_TX;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_update(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules,
+			       rim->rule_num, false);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to destroy cpfl filter rule");
+		goto err;
+	}
+
+	/* free mod index */
+	for (i = rim->pr_num; i < rim->rule_num; i++)
+		fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
+err:
+	cpfl_fxp_rule_free(flow);
+	return ret;
+}
+
+static bool
+cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action,
+		       struct rule_info_meta *rim,
+		       int i)
+{
+	if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		struct cpfl_rule_info *rinfo = &rim->rules[i];
+
+		rinfo->type = CPFL_RULE_TYPE_SEM;
+		rinfo->sem.prof_id = pr_action->sem.prof;
+		rinfo->sem.sub_prof_id = pr_action->sem.subprof;
+		rinfo->sem.key_byte_len = pr_action->sem.keysize;
+		rte_memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len);
+		rinfo->sem.pin_to_cache = PIN_TO_CACHE_DEF;
+		rinfo->sem.fixed_fetch = FIXED_FETCH_DEF;
+	} else {
+		PMD_DRV_LOG(ERR, "Invalid pattern item.");
+		return false;
+	}
+
+	return true;
+}
+
+static int
+cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_rule_info *match_rinfo,
+		       struct cpfl_rule_info *mod_rinfo,
+		       const struct cpfl_flow_mr_action *mr_action)
+{
+	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
+	uint32_t mod_idx;
+	int i;
+	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
+	union cpfl_action_set *act_set =
+		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
+
+	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
+		return -EINVAL;
+
+	*act_set = cpfl_act_mod_profile(PREC_DEF,
+					mr_action->mod.prof,
+					PTI_DEF,
+					0, /* append */
+					0, /* prepend */
+					CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_idx = fxp_mod_idx_alloc(adapter);
+	if (mod_idx == MAX_MOD_CONTENT_INDEX) {
+		PMD_DRV_LOG(ERR, "Out of Mod Index.");
+		return -ENOMEM;
+	}
+
+	*act_set = cpfl_act_mod_addr(PREC_DEF, mod_idx);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
+	minfo->mod_obj_size = MOD_OBJ_SIZE_DEF;
+	minfo->pin_mod_content = PIN_MOD_CONTENT_DEF;
+	minfo->mod_index = mod_idx;
+	mod_rinfo->cookie = 0x1237561;
+	mod_rinfo->port_num = PORT_NUM_DEF;
+	mod_rinfo->resp_req = RESP_REQ_DEF;
+
+	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
+	for (i = 0; i < minfo->mod_content_byte_len; i++)
+		minfo->mod_content[i] = mr_action->mod.data[i];
+
+	return 0;
+}
+
+static int
+cpfl_fxp_parse_action(struct cpfl_itf *itf,
+		      const struct rte_flow_action *actions,
+		      const struct cpfl_flow_mr_action *mr_action,
+		      struct rule_info_meta *rim,
+		      int priority,
+		      int index,
+		      bool is_vport_rule)
+{
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *rss;
+	struct rte_eth_dev_data *data;
+	enum rte_flow_action_type action_type;
+	struct cpfl_vport *vport;
+	/* used when action is REPRESENTED_PORT or REPRESENTED_PORT type */
+	struct cpfl_itf *dst_itf;
+	uint16_t dev_id; /*vsi_id or phyical port id*/
+	bool is_vsi;
+	bool set_meta_valid = false;
+	int queue_id = -1;
+	bool fwd_vsi = false;
+	bool fwd_q = false;
+	bool fwd_jump = false;
+	uint32_t i;
+	struct cpfl_rule_info *rinfo = &rim->rules[index];
+	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
+
+	priority = PREC_MAX - priority;
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			if (!fwd_vsi && !fwd_jump)
+				fwd_vsi = true;
+			else
+				goto err;
+			if (is_vport_rule) {
+				dst_itf = itf;
+			} else {
+				act_ethdev = action->conf;
+				dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id);
+			}
+
+			if (!dst_itf)
+				goto err;
+
+			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
+				vport = (struct cpfl_vport *)dst_itf;
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			} else {
+				queue_id = -2;
+			}
+
+			is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
+				dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR);
+			if (is_vsi || is_vport_rule)
+				dev_id = cpfl_get_vsi_id(dst_itf);
+			else
+				dev_id = cpfl_get_port_id(dst_itf);
+
+			if (dev_id == CPFL_INVALID_HW_ID)
+				goto err;
+
+			if (is_vsi || is_vport_rule)
+				*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			else
+				*act_set = cpfl_act_fwd_port(0, priority, 0, dev_id);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			if (!fwd_q && !fwd_jump)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == -2)
+				goto err;
+			act_q = action->conf;
+			data = itf->data;
+			if (act_q->index >= data->nb_rx_queues)
+				goto err;
+
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += act_q->index;
+			*act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			if (rss->queue_num <= 1)
+				goto err;
+			for (i = 0; i < rss->queue_num - 1; i++) {
+				if (rss->queue[i + 1] != rss->queue[i] + 1)
+					goto err;
+			}
+			data = itf->data;
+			if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues)
+				goto err;
+#define FXP_MAX_QREGION_SIZE 128
+			if (!(rte_is_power_of_2(rss->queue_num) &&
+			      rss->queue_num <= FXP_MAX_QREGION_SIZE))
+				goto err;
+
+			if (!fwd_q && !fwd_jump)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == -2)
+				goto err;
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += rss->queue[0];
+			*act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id,
+								  log(rss->queue_num) / log(2), 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			(*act_set).data = cpfl_act_drop(priority).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			(*act_set).data = cpfl_act_set_commit_mode(priority, 0).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			goto err;
+		}
+	}
+
+	if (mr_action != NULL && !set_meta_valid) {
+		uint32_t i;
+
+		for (i = 0; i < rim->mr_num; i++)
+			if (cpfl_parse_mod_content(itf->adapter, rinfo,
+						   &rim->rules[rim->pr_num + i],
+						   &mr_action[i]))
+				goto err;
+	}
+
+	return 0;
+
+err:
+	PMD_DRV_LOG(ERR, "Invalid action type");
+	return -EINVAL;
+}
+
+static void
+cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo)
+{
+	if (rule_cookie == ~0llu)
+		rule_cookie = COOKIE_DEF;
+	rinfo->cookie = rule_cookie++;
+	rinfo->host_id = HOST_ID_DEF;
+	rinfo->port_num = PORT_NUM_DEF;
+	rinfo->resp_req = RESP_REQ_DEF;
+	rinfo->clear_mirror_1st_state = CLEAR_MIRROR_1ST_STATE_DEF;
+}
+
+static bool is_mod_action(const struct rte_flow_action actions[], bool *set_meta_valid)
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+			return true;
+		case RTE_FLOW_ACTION_TYPE_SET_TAG:
+			*set_meta_valid = true;
+			return true;
+		default:
+			continue;
+		}
+	}
+	return false;
+}
+
+static int
+cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      void **meta)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_pr_action pr_action = { 0 };
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_flow_mr_action mr_action[MAX_MR_ACTION_NUM] = { 0 };
+	uint32_t pr_num = 0, mr_num = 0;
+	struct cpfl_vport *vport;
+	struct rule_info_meta *rim;
+	bool set_meta_valid = false;
+	int ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		if (vport->exceptional) {
+			PMD_DRV_LOG(ERR, "Can't create rte_flow with exceptional vport.");
+			return -EINVAL;
+		}
+	}
+
+	ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr, &pr_action);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "No Match pattern support.");
+		return -EINVAL;
+	}
+
+	if (is_mod_action(actions, &set_meta_valid)) {
+		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "action parse fails.");
+			return -EINVAL;
+		}
+		if (!set_meta_valid)
+			mr_num++;
+	}
+
+	pr_num = 1;
+	rim = rte_zmalloc(NULL,
+			  sizeof(struct rule_info_meta) +
+			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
+			  0);
+	if (!rim)
+		return -ENOMEM;
+
+	rim->pr_action = pr_action;
+	rim->pr_num = pr_num;
+	rim->mr_num = mr_num;
+	rim->rule_num = pr_num + mr_num;
+
+	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid input set");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority,
+				  0, false)) {
+		PMD_DRV_LOG(ERR, "Invalid input set");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	cpfl_fill_rinfo_default_value(&rim->rules[0]);
+
+	if (!meta)
+		rte_free(rim);
+	else
+		*meta = rim;
+
+	return 0;
+}
+
+static int fxp_mod_init(struct cpfl_adapter_ext *ad)
+{
+	uint32_t size = rte_bitmap_get_memory_footprint(MAX_MOD_CONTENT_INDEX);
+
+	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+	if (!mem)
+		return -ENOMEM;
+
+	/* a set bit represent a free slot */
+	ad->mod_bm = rte_bitmap_init_with_all_set(MAX_MOD_CONTENT_INDEX, mem, size);
+	if (!ad->mod_bm) {
+		rte_free(mem);
+		return -EINVAL;
+	}
+
+	ad->mod_bm_mem = mem;
+
+	return 0;
+}
+
+static void fxp_mod_uninit(struct cpfl_adapter_ext *ad)
+{
+	rte_free(ad->mod_bm_mem);
+	ad->mod_bm_mem = NULL;
+	ad->mod_bm = NULL;
+}
+
+static uint32_t fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad)
+{
+	uint64_t slab = 0;
+	uint32_t pos = 0;
+
+	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
+		return MAX_MOD_CONTENT_INDEX;
+
+	pos += __builtin_ffsll(slab) - 1;
+	rte_bitmap_clear(ad->mod_bm, pos);
+
+	return pos;
+}
+
+static void fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
+{
+	rte_bitmap_set(ad->mod_bm, idx);
+}
+
+static int
+cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
+	       struct rte_flow *flow __rte_unused,
+	       struct rte_flow_query_count *count __rte_unused,
+	       struct rte_flow_error *error)
+{
+	rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_HANDLE,
+			   NULL,
+			   "count action not supported by this module");
+
+	return -rte_errno;
+}
+
+static void
+cpfl_fxp_uninit(struct cpfl_adapter_ext *ad)
+{
+	fxp_mod_uninit(ad);
+}
+
+static int
+cpfl_fxp_init(struct cpfl_adapter_ext *ad)
+{
+	int ret = 0;
+
+	ret = fxp_mod_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct
+cpfl_flow_engine cpfl_fxp_engine = {
+	.type = CPFL_FLOW_ENGINE_FXP,
+	.init = cpfl_fxp_init,
+	.uninit = cpfl_fxp_uninit,
+	.create = cpfl_fxp_create,
+	.destroy = cpfl_fxp_destroy,
+	.query_count = cpfl_fxp_query,
+	.parse_pattern_action = cpfl_fxp_parse_pattern_action,
+};
+
+RTE_INIT(cpfl_sw_engine_init)
+{
+	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
+
+	cpfl_flow_engine_register(engine);
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 4061123034..ce46d7e76e 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -43,6 +43,7 @@ js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
     sources += files(
         'cpfl_flow.c',
+	'cpfl_flow_engine_fxp.c',
         'cpfl_flow_parser.c',
         'cpfl_rules.c',
         'cpfl_controlq.c',
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v4 0/9] add rte flow support for cpfl
  2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
@ 2023-08-15 16:50     ` Zhang, Yuying
  2023-08-15 16:50       ` [PATCH v4 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
                         ` (8 more replies)
  2023-09-06  9:33     ` [PATCH v3 1/9] net/cpfl: parse flow parser file in devargs Wenjing Qiao
                       ` (9 subsequent siblings)
  10 siblings, 9 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-15 16:50 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

This patchset add rte flow support for cpfl driver.
It depends on the following patch set:
http://patchwork.dpdk.org/project/dpdk/cover/20230912173039.1612287-1-beilei.xing@intel.com/

Wenjing Qiao (2):
  net/cpfl: add json parser for rte flow pattern rules
  net/cpfl: add mod rule parser support for rte flow

Yuying Zhang (7):
  net/cpfl: set up rte flow skeleton
  net/cpfl: add FXP low level implementation
  net/cpfl: add fxp rule module
  net/cpfl: add fxp flow engine
  net/cpfl: add flow support for representor
  app/test-pmd: refine encap content
  net/cpfl: fix incorrect status calculation

 app/test-pmd/cmdline_flow.c             |   12 +-
 doc/guides/nics/cpfl.rst                |   43 +
 doc/guides/rel_notes/release_23_11.rst  |    1 +
 drivers/net/cpfl/cpfl_actions.h         |  858 +++++++++++
 drivers/net/cpfl/cpfl_controlq.c        |  803 ++++++++++
 drivers/net/cpfl/cpfl_controlq.h        |   75 +
 drivers/net/cpfl/cpfl_ethdev.c          |  394 ++++-
 drivers/net/cpfl/cpfl_ethdev.h          |  128 ++
 drivers/net/cpfl/cpfl_flow.c            |  339 +++++
 drivers/net/cpfl/cpfl_flow.h            |   85 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c |  667 +++++++++
 drivers/net/cpfl/cpfl_flow_parser.c     | 1834 +++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h     |  267 ++++
 drivers/net/cpfl/cpfl_fxp_rule.c        |  296 ++++
 drivers/net/cpfl/cpfl_fxp_rule.h        |   68 +
 drivers/net/cpfl/cpfl_representor.c     |   29 +
 drivers/net/cpfl/cpfl_rules.c           |  126 ++
 drivers/net/cpfl/cpfl_rules.h           |  306 ++++
 drivers/net/cpfl/cpfl_vchnl.c           |  144 ++
 drivers/net/cpfl/meson.build            |   18 +
 20 files changed, 6489 insertions(+), 4 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v4 1/9] net/cpfl: add json parser for rte flow pattern rules
  2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
@ 2023-08-15 16:50       ` Zhang, Yuying
  2023-09-15 15:11         ` Stephen Hemminger
  2023-08-15 16:50       ` [PATCH v4 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
                         ` (7 subsequent siblings)
  8 siblings, 1 reply; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-15 16:50 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu
  Cc: mingxia.liu, Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Add devargs "flow_parser" for rte flow json parser which
depends on json-c library.

Example:
    -a ca:00.0,flow_parser="refpkg.json"

Add json parser for rte flow pattern rules.The cpfl
PMD supports utilizing a JSON config file to translate
rte flow tokens into low level hardware resources.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 doc/guides/nics/cpfl.rst            |   30 +
 drivers/net/cpfl/cpfl_ethdev.c      |   38 +-
 drivers/net/cpfl/cpfl_ethdev.h      |   76 ++
 drivers/net/cpfl/cpfl_flow_parser.c | 1302 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  167 ++++
 drivers/net/cpfl/meson.build        |   13 +
 6 files changed, 1625 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 83a18c3f2e..aae157f0df 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -128,12 +128,24 @@ Runtime Configuration
 
     -a BDF,representor=vf[0-3],representor=c1pf1
 
+- ``flow_parser`` (default ``not enabled``)
+
+  The cpfl PMD supports utilizing a JSON config file to translate rte_flow tokens into
+  low-level hardware resources.
+  Using the ``devargs`` option ``flow_parser`` the user can specify the path
+  of a json file, for example::
+
+    -a ca:00.0,flow_parser="refpkg.json"
+
+  Then the PMD will load json file for device ``ca:00.0``.
+  The parameter is optional.
 
 Driver compilation and testing
 ------------------------------
 
 Refer to the document :doc:`build_and_test` for details.
 
+The json-c library must be installed to use rte_flow.
 
 Features
 --------
@@ -164,3 +176,21 @@ Hairpin queue
 E2100 Series can loopback packets from RX port to TX port.
 This feature is called port-to-port or hairpin.
 Currently, the PMD only supports single port hairpin.
+
+Rte_flow
+~~~~~~~~~~~~~
+
+PMD uses a json file to direct CPF PMD to parse rte_flow tokens into
+low level hardware resources.
+
+- Required Libraries
+
+  * json-c (version 0.14+)
+
+    * For Ubuntu, it can be installed using `apt install libjson-c-dev`
+
+- run testpmd with the json file
+
+   .. code-block:: console
+
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 189072ab33..54ae127cc3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -21,6 +21,10 @@
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+#define CPFL_FLOW_PARSER	"flow_parser"
+#endif
+
 rte_spinlock_t cpfl_adapter_lock;
 /* A list for all adapters, one adapter matches one PCI device */
 struct cpfl_adapter_list cpfl_adapter_list;
@@ -31,6 +35,9 @@ static const char * const cpfl_valid_args_first[] = {
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	CPFL_FLOW_PARSER,
+#endif
 	NULL
 };
 
@@ -1537,6 +1544,24 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 	return 0;
 }
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+static int
+parse_file(const char *key, const char *value, void *args)
+{
+	char *name = args;
+
+	if (strlen(value) > CPFL_FLOW_FILE_LEN - 1) {
+		PMD_DRV_LOG(ERR, "file path(%s) is too long.", value);
+		return -1;
+	}
+
+	PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key);
+	strlcpy(name, value, CPFL_FLOW_FILE_LEN);
+
+	return 0;
+}
+#endif
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
@@ -1585,7 +1610,18 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 				 &adapter->base.is_rx_singleq);
 	if (ret != 0)
 		goto fail;
-
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) {
+		ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER,
+					 &parse_file, cpfl_args->flow_parser);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to parser flow_parser, ret: %d", ret);
+			goto fail;
+		}
+	} else {
+		cpfl_args->flow_parser[0] = '\0';
+	}
+#endif
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index d0dcc0cc05..383dbd14c6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -77,6 +77,11 @@
 #define CPFL_VPORT_LAN_PF	0
 #define CPFL_VPORT_LAN_VF	1
 
+#define CPFL_FLOW_FILE_LEN 100
+#define CPFL_INVALID_HW_ID	UINT16_MAX
+#define CPFL_META_CHUNK_LENGTH	1024
+#define CPFL_META_LENGTH	32
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -99,6 +104,7 @@ struct cpfl_devargs {
 	uint16_t req_vport_nb;
 	uint8_t repr_args_num;
 	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
+	char flow_parser[CPFL_FLOW_FILE_LEN];
 };
 
 struct p2p_queue_chunks_info {
@@ -165,6 +171,20 @@ struct cpfl_repr {
 	bool func_up; /* If the represented function is up */
 };
 
+struct cpfl_metadata_chunk {
+	int type;
+	uint8_t data[CPFL_META_CHUNK_LENGTH];
+};
+
+/**
+ * It is driver's responsibility to simlulate a metadata buffer which
+ * can be used as data source to fill the key of a flow rule.
+ */
+struct cpfl_metadata {
+	int length;
+	struct cpfl_metadata_chunk chunks[CPFL_META_LENGTH];
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -185,6 +205,8 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
+
+	struct cpfl_metadata meta;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -211,4 +233,58 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
+static inline uint16_t
+cpfl_get_port_id(struct cpfl_itf *itf)
+{
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		struct cpfl_vport *vport = (void *)itf;
+
+		return vport->base.devarg_id;
+	}
+
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline uint16_t
+cpfl_get_vsi_id(struct cpfl_itf *itf)
+{
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_vport_info *info;
+	uint32_t vport_id;
+	int ret;
+	struct cpfl_vport_id vport_identity;
+
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		struct cpfl_repr *repr = (void *)itf;
+
+		return repr->vport_info->vport.info.vsi_id;
+	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
+
+		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		/* host: CPFL_HOST0_CPF_ID, acc: CPFL_ACC_CPF_ID */
+		vport_identity.pf_id = CPFL_ACC_CPF_ID;
+		vport_identity.vf_id = 0;
+		vport_identity.vport_id = vport_id;
+		ret = rte_hash_lookup_data(adapter->vport_map_hash,
+					   &vport_identity,
+					   (void **)&info);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "vport id not exist");
+			goto err;
+		}
+
+		return info->vport.info.vsi_id;
+	}
+
+err:
+	return CPFL_INVALID_HW_ID;
+}
+
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
new file mode 100644
index 0000000000..630ce8a227
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -0,0 +1,1302 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <arpa/inet.h>
+#include <asm-generic/errno-base.h>
+#include <stdint.h>
+
+#include "cpfl_flow_parser.h"
+#include "cpfl_ethdev.h"
+#include "rte_malloc.h"
+
+static enum rte_flow_item_type
+cpfl_get_item_type_by_str(const char *type)
+{
+	if (strcmp(type, "eth") == 0)
+		return RTE_FLOW_ITEM_TYPE_ETH;
+	else if (strcmp(type, "ipv4") == 0)
+		return RTE_FLOW_ITEM_TYPE_IPV4;
+	else if (strcmp(type, "tcp") == 0)
+		return RTE_FLOW_ITEM_TYPE_TCP;
+	else if (strcmp(type, "udp") == 0)
+		return RTE_FLOW_ITEM_TYPE_UDP;
+	else if (strcmp(type, "vxlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VXLAN;
+	else if (strcmp(type, "icmp") == 0)
+		return RTE_FLOW_ITEM_TYPE_ICMP;
+	else if (strcmp(type, "vlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VLAN;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ITEM_TYPE_VOID;
+}
+
+static const char *
+cpfl_json_object_to_string(json_object *object, const char *name)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return NULL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return NULL;
+	}
+
+	return json_object_get_string(subobject);
+}
+
+static int
+cpfl_json_object_to_uint16(json_object *object, const char *name, uint16_t *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_json_object_to_uint32(json_object *object, const char *name, uint32_t *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int64(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_attr(json_object *ob_pr_key_attrs, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len;
+	struct cpfl_flow_js_pr_key_attr *attr;
+
+	len = json_object_array_length(ob_pr_key_attrs);
+	js_pr->key.attributes = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_attr), 0);
+	if (!js_pr->key.attributes) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->key.attr_size = len;
+	attr = js_pr->key.attributes;
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		const char *name;
+		uint16_t value = 0;
+		int ret;
+
+		object = json_object_array_get_idx(ob_pr_key_attrs, i);
+		name = cpfl_json_object_to_string(object, "Name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'Name'.");
+			goto err;
+		}
+		ret = cpfl_json_object_to_uint16(object, "Value", &value);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+			goto err;
+		}
+		if (strcmp(name, "ingress") == 0) {
+			attr->ingress = value;
+		} else if (strcmp(name, "egress") == 0) {
+			attr->egress = value;
+		} else {
+			/* TODO: more... */
+			PMD_DRV_LOG(ERR, "Not support attr name: %s.", name);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	rte_free(js_pr->key.attributes);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto_field(json_object *ob_fields,
+				     struct cpfl_flow_js_pr_key_proto *js_field)
+{
+	int len, i;
+
+	if (!ob_fields)
+		return 0;
+	len = json_object_array_length(ob_fields);
+	if (len == 0)
+		return 0;
+	js_field->fields_size = len;
+	js_field->fields =
+	    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len, 0);
+	if (!js_field->fields) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		const char *name, *mask;
+
+		object = json_object_array_get_idx(ob_fields, i);
+		name = cpfl_json_object_to_string(object, "name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
+			goto err;
+		}
+		if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+			PMD_DRV_LOG(ERR, "The 'name' is too long.");
+			goto err;
+		}
+		memcpy(js_field->fields[i].name, name, strlen(name));
+
+		if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
+		    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			mask = cpfl_json_object_to_string(object, "mask");
+			if (!mask) {
+				PMD_DRV_LOG(ERR, "Can not parse string 'mask'.");
+				goto err;
+			}
+			if (strlen(mask) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+				PMD_DRV_LOG(ERR, "The 'mask' is too long.");
+				goto err;
+			}
+			memcpy(js_field->fields[i].mask, mask, strlen(mask));
+		} else {
+			uint32_t mask_32b;
+			int ret;
+
+			ret = cpfl_json_object_to_uint32(object, "mask", &mask_32b);
+			if (ret < 0) {
+				PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'.");
+				goto err;
+			}
+			js_field->fields[i].mask_32b = mask_32b;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_field->fields);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto(json_object *ob_pr_key_protos, struct cpfl_flow_js_pr *js_pr)
+{
+	int len, i, ret;
+
+	len = json_object_array_length(ob_pr_key_protos);
+	if (len == 0)
+		return 0;
+	js_pr->key.proto_size = len;
+	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto) * len, 0);
+	if (!js_pr->key.protocols) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_object *object, *ob_fields;
+		const char *type;
+		enum rte_flow_item_type item_type;
+
+		object = json_object_array_get_idx(ob_pr_key_protos, i);
+		/* pr->key->proto->type */
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		item_type = cpfl_get_item_type_by_str(type);
+		if (item_type == RTE_FLOW_ITEM_TYPE_VOID)
+			goto err;
+		js_pr->key.protocols[i].type = item_type;
+		/* pr->key->proto->fields */
+		ob_fields = json_object_object_get(object, "fields");
+		ret = cpfl_flow_js_pattern_key_proto_field(ob_fields,
+							   &js_pr->key.protocols[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	rte_free(js_pr->key.protocols);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_proto(json_object *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	uint16_t layer = 0, offset = 0, mask = 0;
+	const char *header;
+	enum rte_flow_item_type type;
+	int ret;
+
+	ret = cpfl_json_object_to_uint16(ob_value, "layer", &layer);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+		return -EINVAL;
+	}
+
+	header = cpfl_json_object_to_string(ob_value, "header");
+	if (!header) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_object_to_uint16(ob_value, "offset", &offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_object_to_uint16(ob_value, "mask", &mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
+		return -EINVAL;
+	}
+	type = cpfl_get_item_type_by_str(header);
+	if (type == RTE_FLOW_ITEM_TYPE_VOID)
+		return -EINVAL;
+	js_fv->proto.layer = layer;
+	js_fv->proto.offset = offset;
+	js_fv->proto.mask = mask;
+	js_fv->proto.header = type;
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_metadata(json_object *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	int ret;
+
+	ret = cpfl_json_object_to_uint16(ob_value, "type", &js_fv->meta.type);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_object_to_uint16(ob_value, "offset", &js_fv->meta.offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_object_to_uint16(ob_value, "mask", &js_fv->meta.mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv(json_object *ob_fvs, struct cpfl_flow_js_pr_action *js_act)
+{
+	int len, i;
+
+	len = json_object_array_length(ob_fvs);
+	if (len == 0)
+		return 0;
+	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0);
+	if (!js_act->sem.fv) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_act->sem.fv_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_fv *js_fv;
+		json_object *object, *ob_value;
+		uint16_t offset = 0;
+		const char *type;
+		int ret;
+
+		js_fv = &js_act->sem.fv[i];
+		object = json_object_array_get_idx(ob_fvs, i);
+		ret = cpfl_json_object_to_uint16(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_fv->offset = offset;
+
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		ob_value = json_object_object_get(object, "value");
+		if (strcmp(type, "immediate") == 0) {
+			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
+			js_fv->immediate = json_object_get_int(ob_value);
+		} else if (strcmp(type, "metadata") == 0) {
+			js_fv->type = CPFL_FV_TYPE_METADATA;
+			cpfl_flow_js_pattern_act_fv_metadata(ob_value, js_fv);
+		} else if (strcmp(type, "protocol") == 0) {
+			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
+			cpfl_flow_js_pattern_act_fv_proto(ob_value, js_fv);
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_act->sem.fv);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_per_act(json_object *ob_per_act, struct cpfl_flow_js_pr_action *js_act)
+{
+	const char *type;
+	int ret;
+
+	/* pr->actions->type */
+	type = cpfl_json_object_to_string(ob_per_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* pr->actions->data */
+	if (strcmp(type, "sem") == 0) {
+		json_object *ob_fvs, *ob_sem;
+
+		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+		ob_sem = json_object_object_get(ob_per_act, "data");
+		ret = cpfl_json_object_to_uint16(ob_sem, "profile",
+						 &js_act->sem.prof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(ob_sem, "subprofile",
+						 &js_act->sem.subprof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(ob_sem, "keysize",
+						 &js_act->sem.keysize);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
+			return -EINVAL;
+		}
+		ob_fvs = json_object_object_get(ob_sem, "fieldvectors");
+		ret = cpfl_flow_js_pattern_act_fv(ob_fvs, js_act);
+		if (ret < 0)
+			return ret;
+	} else {
+		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act(json_object *ob_pr_acts, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len, ret;
+
+	len = json_object_array_length(ob_pr_acts);
+	if (len == 0)
+		return 0;
+	js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action) * len, 0);
+	if (!js_pr->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->actions_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_pr_action *js_act;
+		json_object *object;
+
+		object = json_object_array_get_idx(ob_pr_acts, i);
+		js_act = &js_pr->actions[i];
+		ret = cpfl_flow_js_pattern_per_act(object, js_act);
+		if (ret < 0) {
+			rte_free(js_pr->actions);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * The patterns object array defines a set of rules directing the PMD to match sequences of
+ * rte_flow protocol headers and translate them into profile/field vectors for each pipeline
+ * stage. This object is mandatory.
+ */
+static int
+cpfl_flow_js_pattern_rule(json_object *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_object *ob_prs;
+	int i, len;
+
+	/* Pattern Rules */
+	ob_prs = json_object_object_get(ob_root, "patterns");
+	if (!ob_prs) {
+		PMD_DRV_LOG(ERR, "The patterns is mandatory.");
+		return -EINVAL;
+	}
+
+	len = json_object_array_length(ob_prs);
+	if (len == 0)
+		return 0;
+	parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len, 0);
+	if (!parser->patterns) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	parser->pr_size = len;
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		json_object *ob_pr_actions;
+		json_object *ob_pr_key;
+		json_object *ob_pr_key_protos;
+		json_object *ob_pr_key_attrs;
+		int ret;
+
+		object = json_object_array_get_idx(ob_prs, i);
+		/* pr->key */
+		ob_pr_key = json_object_object_get(object, "key");
+		/* pr->key->protocols */
+		ob_pr_key_protos = json_object_object_get(ob_pr_key, "protocols");
+		ret = cpfl_flow_js_pattern_key_proto(ob_pr_key_protos, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->protocols.");
+			goto err;
+		}
+		/* pr->key->attributes */
+		ob_pr_key_attrs = json_object_object_get(ob_pr_key, "attributes");
+		ret = cpfl_flow_js_pattern_key_attr(ob_pr_key_attrs, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->attributes.");
+			goto err;
+		}
+		/* pr->actions */
+		ob_pr_actions = json_object_object_get(object, "actions");
+		ret = cpfl_flow_js_pattern_act(ob_pr_actions, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->patterns);
+	return -EINVAL;
+}
+
+static int
+cpfl_parser_init(json_object *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	int ret = 0;
+
+	ret = cpfl_flow_js_pattern_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char *filename)
+{
+	struct cpfl_flow_js_parser *parser;
+	json_object *root;
+	int ret;
+
+	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser), 0);
+	if (!parser) {
+		PMD_DRV_LOG(ERR, "Not enough memory to create flow parser.");
+		return -ENOMEM;
+	}
+	root = json_object_from_file(filename);
+	if (!root) {
+		PMD_DRV_LOG(ERR, "Can not load JSON file: %s.", filename);
+		rte_free(parser);
+		return -EINVAL;
+	}
+	ret = cpfl_parser_init(root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parser init failed.");
+		goto free_parser;
+	}
+	*flow_parser = parser;
+	ret = json_object_put(root);
+	if (ret != 1) {
+		PMD_DRV_LOG(ERR, "Free json_object failed.");
+		return -EINVAL;
+	}
+
+	return 0;
+free_parser:
+	rte_free(parser);
+	return -EINVAL;
+}
+
+static void
+cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
+{
+	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM)
+		rte_free(pr_act->sem.fv);
+}
+
+int
+cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
+{
+	int i, j;
+
+	if (!parser)
+		return 0;
+
+	for (i = 0; i < parser->pr_size; i++) {
+		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
+
+		if (!pattern)
+			continue;
+		for (j = 0; j < pattern->key.proto_size; j++)
+			rte_free(pattern->key.protocols[j].fields);
+		rte_free(pattern->key.protocols);
+		rte_free(pattern->key.attributes);
+
+		for (j = 0; j < pattern->actions_size; j++) {
+			struct cpfl_flow_js_pr_action *pr_act;
+
+			pr_act = &pattern->actions[j];
+			cpfl_parser_free_pr_action(pr_act);
+		}
+		rte_free(pattern->actions);
+	}
+	rte_free(parser->patterns);
+	rte_free(parser);
+
+	return 0;
+}
+
+static int
+cpfl_get_items_length(const struct rte_flow_item *items)
+{
+	int length = 0;
+	const struct rte_flow_item *item = items;
+
+	while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
+		       uint16_t offset, uint8_t *fv)
+{
+	uint16_t v_layer, v_offset, v_mask;
+	enum rte_flow_item_type v_header;
+	int j, layer, length;
+	uint16_t temp_fv;
+
+	length = cpfl_get_items_length(items);
+	v_layer = js_fv->proto.layer;
+	v_header = js_fv->proto.header;
+	v_offset = js_fv->proto.offset;
+	v_mask = js_fv->proto.mask;
+	layer = 0;
+	for (j = 0; j < length - 1; j++) {
+		if (items[j].type == v_header) {
+			if (layer == v_layer) {
+				/* copy out 16 bits from offset */
+				const uint8_t *pointer;
+
+				pointer = &(((const uint8_t *)(items[j].spec))[v_offset]);
+				temp_fv = ntohs((*((const uint16_t *)pointer)) & v_mask);
+				fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+				fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+				break;
+			}
+			layer++;
+		} /* TODO: more type... */
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_fieldvectors(struct cpfl_itf *itf, struct cpfl_flow_js_fv *js_fvs, int size,
+			uint8_t *fv, const struct rte_flow_item *items)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		uint16_t offset, temp_fv, value_int;
+		enum cpfl_flow_js_fv_type type;
+		struct cpfl_flow_js_fv *js_fv;
+
+		js_fv = &js_fvs[i];
+		offset = js_fv->offset;
+		type = js_fv->type;
+		if (type == CPFL_FV_TYPE_IMMEDIATE) {
+			value_int = js_fv->immediate;
+			temp_fv = (value_int << 8) & 0xff00;
+			fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+		} else if (type == CPFL_FV_TYPE_METADATA) {
+			uint16_t type, v_offset, mask;
+
+			type = js_fv->meta.type;
+			v_offset = js_fv->meta.offset;
+			mask = js_fv->meta.mask;
+			temp_fv = cpfl_metadata_read16(&itf->adapter->meta, type, v_offset) & mask;
+			fv[2 * offset] = (uint8_t)(temp_fv & 0x00ff);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv >> 8);
+		} else if (type == CPFL_FV_TYPE_PROTOCOL) {
+			ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv);
+			if (ret)
+				return ret;
+		} else {
+			PMD_DRV_LOG(DEBUG, "not support this type: %d.", type);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_pr_actions(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_pr_action *actions,
+		      int size,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		struct cpfl_flow_js_pr_action *pr_act;
+		enum cpfl_flow_pr_action_type type;
+
+		pr_act = &actions[i];
+		/* pr->actions->type */
+		type = pr_act->type;
+		/* pr->actions->data */
+		if (attr->group == 1  && type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+			struct cpfl_flow_js_pr_action_sem *sem = &pr_act->sem;
+
+			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+			pr_action->sem.prof = sem->prof;
+			pr_action->sem.subprof = sem->subprof;
+			pr_action->sem.keysize = sem->keysize;
+			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
+			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
+			ret = cpfl_parse_fieldvectors(itf, sem->fv, sem->fv_size,
+						      pr_action->sem.cpfl_flow_pr_fv, items);
+			return ret;
+		} else if (attr->group > 4 || attr->group == 0) {
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_eth_mask(const char *mask, struct rte_ether_addr addr)
+{
+	int i, ret;
+	struct rte_ether_addr mask_bytes;
+
+	ret = rte_ether_unformat_addr(mask, &mask_bytes);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "translate mac address from string to rte_ether_addr failed.");
+		return -EINVAL;
+	}
+	/* validate eth mask addr if match */
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+		if (mask_bytes.addr_bytes[i] != addr.addr_bytes[i])
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
+{
+	uint32_t out_addr;
+
+	/* 0: success; -EINVAL: invalid; -ENOTSUP: fail */
+	int ret = inet_pton(AF_INET, mask, &out_addr);
+
+	if (ret < 0)
+		return -EINVAL;
+	/* validate ipv4 mask addr if match */
+	if (out_addr != addr)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_eth *eth_mask)
+{
+	int field_size, j;
+	int flag_dst_addr, flag_src_addr, flag_ether_type;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !eth_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_ether_type = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name, *s_mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst, more see Field Mapping
+		 */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		if (strcmp(name, "src_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->src) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->dst) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "ether_type") == 0) {
+			uint16_t mask = (uint16_t)field->mask_32b;
+
+			if (mask != eth_mask->type)
+				return -EINVAL;
+			flag_ether_type = true;
+		} else {
+			/* TODO: more type... */
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (strcmp((const char *)eth_mask->src.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (strcmp((const char *)eth_mask->dst.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_ether_type) {
+		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+	int field_size, j;
+	int flag_next_proto_id, flag_src_addr, flag_dst_addr;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !ipv4_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_next_proto_id = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+
+		field = &proto->fields[j];
+		name = field->name;
+		if (strcmp(name, "src_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.src_addr) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.dst_addr) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "next_proto_id") == 0) {
+			uint8_t mask;
+
+			mask = (uint8_t)field->mask_32b;
+			if (mask != ipv4_mask->hdr.next_proto_id)
+				return -EINVAL;
+			flag_next_proto_id = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_next_proto_id) {
+		if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_tcp *tcp_mask)
+{
+	int field_size, j;
+	int flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !tcp_mask)
+		return 0;
+
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (tcp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (tcp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (tcp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (tcp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_udp *udp_mask)
+{
+	int field_size, j;
+	bool flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && !udp_mask)
+		return 0;
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst */
+		name = field->name; /* match: rte_flow_item->mask */
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (udp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (udp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name: %s.", name);
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (udp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (udp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto,
+		 const struct rte_flow_item_vxlan *vxlan_mask)
+{
+	int field_size, j;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && !vxlan_mask)
+		return 0;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		int64_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		mask = (int64_t)field->mask_32b;
+		if (strcmp(name, "vx_vni") == 0) {
+			if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) != mask)
+				return -EINVAL;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_icmp *icmp_mask)
+{
+	int field_size;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if ((field_size != 0 && !icmp_mask) || (field_size == 0 && icmp_mask))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
+			     int proto_size,
+			     const struct rte_flow_item *items)
+{
+	int i, length;
+	int j = 0;
+
+	length = cpfl_get_items_length(items);
+	if (proto_size > length - 1)
+		return -EINVAL;
+	for (i = 0; i < proto_size; i++) {
+		struct cpfl_flow_js_pr_key_proto *key_proto;
+		enum rte_flow_item_type type;
+
+		key_proto = &protocols[i];
+		/* pr->key->proto->type */
+		type = key_proto->type;
+		/* pr->key->proto->fields */
+		switch (type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
+				const struct rte_flow_item_eth *eth_mask;
+				int ret;
+
+				eth_mask = (const struct rte_flow_item_eth *)items[i].mask;
+				ret = cpfl_check_eth(key_proto, eth_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				const struct rte_flow_item_ipv4 *ipv4_mask;
+				int ret;
+
+				ipv4_mask = (const struct rte_flow_item_ipv4 *)items[i].mask;
+				ret = cpfl_check_ipv4(key_proto, ipv4_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) {
+				const struct rte_flow_item_tcp *tcp_mask;
+				int ret;
+
+				tcp_mask = (const struct rte_flow_item_tcp *)items[i].mask;
+				ret = cpfl_check_tcp(key_proto, tcp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) {
+				const struct rte_flow_item_udp *udp_mask;
+				int ret;
+
+				udp_mask = (const struct rte_flow_item_udp *)items[i].mask;
+				ret = cpfl_check_udp(key_proto, udp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+				const struct rte_flow_item_vxlan *vxlan_mask;
+				int ret;
+
+				vxlan_mask = (const struct rte_flow_item_vxlan *)items[i].mask;
+				ret = cpfl_check_vxlan(key_proto, vxlan_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) {
+				const struct rte_flow_item_icmp *icmp_mask;
+				int ret;
+
+				icmp_mask = (const struct rte_flow_item_icmp *)items[i].mask;
+				ret = cpfl_check_icmp(key_proto, icmp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+	if (items[j].type != RTE_FLOW_ITEM_TYPE_END)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr,
+			    const struct rte_flow_attr *attr)
+{
+	if (key_attr->ingress != attr->ingress) {
+		PMD_DRV_LOG(DEBUG, "ingress not match.");
+		return -EINVAL;
+	}
+	if (key_attr->egress != attr->egress) {
+		PMD_DRV_LOG(DEBUG, "egress not match.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern,
+		       const struct rte_flow_item *items,
+		       const struct rte_flow_attr *attr)
+{
+	int ret;
+
+	/* pr->key */
+	/* pr->key->protocols */
+	ret = cpfl_check_pattern_key_proto(pattern->key.protocols,
+					   pattern->key.proto_size, items);
+	if (ret < 0)
+		return -EINVAL;
+	/* pr->key->attributes */
+	ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr);
+	if (ret < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* output: struct cpfl_flow_pr_action* pr_action */
+int
+cpfl_flow_parse_items(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_parser *parser,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, size;
+	struct cpfl_flow_js_pr *pattern;
+
+	size = parser->pr_size;
+	for (i = 0; i < size; i++) {
+		int ret;
+
+		pattern = &parser->patterns[i];
+		ret = cpfl_check_pattern_key(pattern, items, attr);
+		if (ret < 0)
+			continue;
+		/* pr->actions */
+		ret = cpfl_parse_pr_actions(itf, pattern->actions, pattern->actions_size,
+					    items, attr, pr_action);
+		return ret;
+	}
+
+	return -EINVAL;
+}
+
+bool
+cpfl_metadata_write_port_id(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 5;
+
+	dev_id = cpfl_get_port_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID\n");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 3);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_targetvsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 2;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 1);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 0;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
+
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 24;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
new file mode 100644
index 0000000000..af64a158a8
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <json-c/json.h>
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#ifndef _CPFL_FLOW_PARSER_H_
+#define _CPFL_FLOW_PARSER_H_
+
+#define CPFL_FLOW_JSON_STR_SIZE_MAX 100
+#define CPFL_MAX_SEM_FV_KEY_SIZE 64
+#define CPFL_FLOW_JS_PROTO_SIZE 16
+#define CPFL_MOD_KEY_NUM_MAX 8
+
+/* Pattern Rules Storage */
+enum cpfl_flow_pr_action_type {
+	CPFL_JS_PR_ACTION_TYPE_SEM,
+	CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
+};
+
+/* This structure matches a sequence of fields in struct rte_flow_attr */
+struct cpfl_flow_js_pr_key_attr {
+	uint16_t ingress;
+	uint16_t egress;
+};
+
+struct cpfl_flow_js_pr_key_proto_field {
+	char name[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	union {
+		char mask[CPFL_FLOW_JSON_STR_SIZE_MAX];
+		uint32_t mask_32b;
+	};
+};
+
+/* This structure matches a sequence of "struct rte_flow_item" */
+struct cpfl_flow_js_pr_key_proto {
+	enum rte_flow_item_type type;
+	struct cpfl_flow_js_pr_key_proto_field *fields;
+	int fields_size;
+};
+
+enum cpfl_flow_js_fv_type {
+	CPFL_FV_TYPE_PROTOCOL,
+	CPFL_FV_TYPE_IMMEDIATE,
+	CPFL_FV_TYPE_METADATA,
+	CPFL_FV_TYPE_UNKNOWN = -1,
+};
+
+struct cpfl_flow_js_fv {
+	uint16_t offset;
+	enum cpfl_flow_js_fv_type type;
+	union {
+		/*  a 16 bits value */
+		uint16_t immediate;
+		/* a reference to a protocol header with a <header, layer, offset, mask> tuple */
+		struct {
+			enum rte_flow_item_type header;
+			uint16_t layer;
+			uint16_t offset;
+			uint16_t mask;
+		} proto;
+		/* a reference to a metadata */
+		struct {
+			uint16_t type;
+			uint16_t offset;
+			uint16_t mask;
+		} meta;
+	};
+};
+
+/**
+ * This structure defines the message be used to composite the
+ * profile / key of a SEM control packet
+ */
+struct cpfl_flow_js_pr_action_sem {
+	uint16_t prof;		    /* SEM profile ID */
+	uint16_t subprof;	    /* SEM subprofile ID */
+	uint16_t keysize;	    /*  extract key size in bytes */
+	struct cpfl_flow_js_fv *fv; /* A SEM field vector array */
+	int fv_size;
+};
+
+/* define how to map current key to low level pipeline configuration */
+struct cpfl_flow_js_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_js_pr_action_sem sem;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD how to parse rte_flow
+ * protocol headers. Each rule be described by a key object and a action array.
+ */
+struct cpfl_flow_js_pr {
+	struct {
+		struct cpfl_flow_js_pr_key_proto *protocols;
+		uint16_t proto_size;
+		struct cpfl_flow_js_pr_key_attr *attributes;
+		uint16_t attr_size;
+	} key;
+	/* An array to define how to map current key to low level pipeline configuration. */
+	struct cpfl_flow_js_pr_action *actions;
+	uint16_t actions_size;
+};
+
+struct cpfl_flow_js_parser {
+	struct cpfl_flow_js_pr *patterns;
+	int pr_size;
+};
+
+/* Pattern Rules */
+struct cpfl_flow_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	uint8_t cpfl_flow_pr_fv[CPFL_MAX_SEM_FV_KEY_SIZE];
+};
+
+struct cpfl_flow_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_pr_action_sem sem;
+	};
+};
+
+int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
+int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
+int cpfl_flow_parse_items(struct cpfl_itf *itf,
+			  struct cpfl_flow_js_parser *parser,
+			  const struct rte_flow_item *items,
+			  const struct rte_flow_attr *attr,
+			  struct cpfl_flow_pr_action *pr_action);
+bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf);
+
+static inline void
+cpfl_metadata_init(struct cpfl_metadata *meta)
+{
+	int i;
+
+	for (i = 0; i < CPFL_META_LENGTH; i++)
+		meta->chunks[i].type = i;
+}
+
+static inline void
+cpfl_metadata_write16(struct cpfl_metadata *meta, int type, int offset, uint16_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint16_t));
+}
+
+static inline void
+cpfl_metadata_write32(struct cpfl_metadata *meta, int type, int offset, uint32_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint32_t));
+}
+
+static inline uint16_t
+cpfl_metadata_read16(struct cpfl_metadata *meta, int type, int offset)
+{
+	return *((uint16_t *)(&meta->chunks[type].data[offset]));
+}
+
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d8b92ae16a..1e0a1b0290 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -38,3 +38,16 @@ if arch_subdir == 'x86'
         cflags += ['-DCC_AVX512_SUPPORT']
     endif
 endif
+
+js_dep = dependency('json-c', required: false, method : 'pkg-config')
+if js_dep.found()
+    if js_dep.version().version_compare('<0.14')
+        message('json-c lib version is too low')
+    else
+        sources += files(
+                'cpfl_flow_parser.c',
+        )
+        dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
+        ext_deps += js_dep
+    endif
+endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v4 2/9] net/cpfl: add mod rule parser support for rte flow
  2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
  2023-08-15 16:50       ` [PATCH v4 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
@ 2023-08-15 16:50       ` Zhang, Yuying
  2023-08-15 16:50       ` [PATCH v4 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
                         ` (6 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-15 16:50 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu
  Cc: mingxia.liu, Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Add json parser support for rte flow modification rules.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_flow_parser.c | 534 +++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_flow_parser.h | 100 ++++++
 2 files changed, 633 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
index 630ce8a227..c33ee1ec27 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.c
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -32,6 +32,18 @@ cpfl_get_item_type_by_str(const char *type)
 	return RTE_FLOW_ITEM_TYPE_VOID;
 }
 
+static enum rte_flow_action_type
+cpfl_get_action_type_by_str(const char *type)
+{
+	if (strcmp(type, "vxlan_encap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	else if (strcmp(type, "vxlan_decap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ACTION_TYPE_VOID;
+}
+
 static const char *
 cpfl_json_object_to_string(json_object *object, const char *name)
 {
@@ -50,6 +62,25 @@ cpfl_json_object_to_string(json_object *object, const char *name)
 	return json_object_get_string(subobject);
 }
 
+static int
+cpfl_json_object_to_int(json_object *object, const char *name, int *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int(subobject);
+
+	return 0;
+}
+
 static int
 cpfl_json_object_to_uint16(json_object *object, const char *name, uint16_t *value)
 {
@@ -517,6 +548,228 @@ cpfl_flow_js_pattern_rule(json_object *ob_root, struct cpfl_flow_js_parser *pars
 	return -EINVAL;
 }
 
+static int
+cpfl_flow_js_mr_key(json_object *ob_mr_keys, struct cpfl_flow_js_mr_key *js_mr_key)
+{
+	int len, i;
+
+	len = json_object_array_length(ob_mr_keys);
+	if (len == 0)
+		return 0;
+	js_mr_key->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_key_action) * len, 0);
+	if (!js_mr_key->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_mr_key->actions_size = len;
+	for (i = 0; i < len; i++) {
+		json_object *object, *ob_data;
+		const char *type;
+		enum rte_flow_action_type act_type;
+
+		object = json_object_array_get_idx(ob_mr_keys, i);
+		/* mr->key->actions->type */
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		act_type = cpfl_get_action_type_by_str(type);
+		if (act_type == RTE_FLOW_ACTION_TYPE_VOID)
+			goto err;
+		js_mr_key->actions[i].type = act_type;
+		/* mr->key->actions->data */
+		ob_data = json_object_object_get(object, "data");
+		if (js_mr_key->actions[i].type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			json_object *ob_protos;
+			int proto_size, j;
+			struct cpfl_flow_js_mr_key_action_vxlan_encap *encap;
+
+			ob_protos = json_object_object_get(ob_data, "protocols");
+			encap = &js_mr_key->actions[i].encap;
+			if (!ob_protos) {
+				encap->proto_size = 0;
+				continue;
+			}
+			proto_size = json_object_array_length(ob_protos);
+			encap->proto_size = proto_size;
+			for (j = 0; j < proto_size; j++) {
+				const char *s;
+				json_object *subobject;
+				enum rte_flow_item_type proto_type;
+
+				subobject = json_object_array_get_idx(ob_protos, j);
+				s = json_object_get_string(subobject);
+				proto_type = cpfl_get_item_type_by_str(s);
+				if (proto_type == RTE_FLOW_ITEM_TYPE_VOID) {
+					PMD_DRV_LOG(ERR, "parse VXLAN_ENCAP failed.");
+					goto err;
+				}
+				encap->protocols[j] = proto_type;
+			}
+		} else if (js_mr_key->actions[i].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			PMD_DRV_LOG(ERR, "not support this type: %d.", js_mr_key->actions[i].type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mr_key->actions);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_layout(json_object *ob_layouts, struct cpfl_flow_js_mr_action_mod *js_mod)
+{
+	int len, i;
+
+	len = json_object_array_length(ob_layouts);
+	js_mod->layout_size = len;
+	if (len == 0)
+		return 0;
+	js_mod->layout = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_layout) * len, 0);
+	if (!js_mod->layout) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		int index = 0, size = 0, offset = 0;
+		int ret;
+		const char *hint;
+
+		object = json_object_array_get_idx(ob_layouts, i);
+		ret = cpfl_json_object_to_int(object, "index", &index);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'index'.");
+			goto err;
+		}
+		js_mod->layout[i].index = index;
+		ret = cpfl_json_object_to_int(object, "size", &size);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+			goto err;
+		}
+		js_mod->layout[i].size = size;
+		ret = cpfl_json_object_to_int(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_mod->layout[i].offset = offset;
+		hint = cpfl_json_object_to_string(object, "hint");
+		if (!hint) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'hint'.");
+			goto err;
+		}
+		memcpy(js_mod->layout[i].hint, hint, strlen(hint));
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mod->layout);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_action(json_object *ob_mr_act, struct cpfl_flow_js_mr_action *js_mr_act)
+{
+	json_object *ob_data;
+	const char *type;
+
+	/* mr->action->type */
+	type = cpfl_json_object_to_string(ob_mr_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* mr->action->data */
+	ob_data = json_object_object_get(ob_mr_act, "data");
+	if (strcmp(type, "mod") == 0) {
+		json_object *ob_layouts;
+		uint16_t profile = 0;
+		int ret;
+
+		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		ret = cpfl_json_object_to_uint16(ob_data, "profile", &profile);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		js_mr_act->mod.prof = profile;
+		ob_layouts = json_object_object_get(ob_data, "layout");
+		ret = cpfl_flow_js_mr_layout(ob_layouts, &js_mr_act->mod);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse layout.");
+			return ret;
+		}
+	} else  {
+		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * The modifications object array defines a set of rules for the PMD to match rte_flow
+ * modification actions and translate them into the Modification profile. This object
+ * is optional.
+ */
+static int
+cpfl_flow_js_mod_rule(json_object *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_object *ob_mrs;
+	int i, len;
+
+	ob_mrs = json_object_object_get(ob_root, "modifications");
+	if (!ob_mrs) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+	len = json_object_array_length(ob_mrs);
+	if (len == 0)
+		return 0;
+	parser->mr_size = len;
+	parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr) * len, 0);
+	if (!parser->modifications) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		int ret;
+		json_object *object, *ob_mr_key, *ob_mr_action, *ob_mr_key_action;
+
+		object = json_object_array_get_idx(ob_mrs, i);
+		/* mr->key */
+		ob_mr_key = json_object_object_get(object, "key");
+		/* mr->key->actions */
+		ob_mr_key_action = json_object_object_get(ob_mr_key, "actions");
+		ret = cpfl_flow_js_mr_key(ob_mr_key_action, &parser->modifications[i].key);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_key failed.");
+			goto err;
+		}
+		/* mr->action */
+		ob_mr_action = json_object_object_get(object, "action");
+		ret = cpfl_flow_js_mr_action(ob_mr_action, &parser->modifications[i].action);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_action failed.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->modifications);
+	return -EINVAL;
+}
+
 static int
 cpfl_parser_init(json_object *ob_root, struct cpfl_flow_js_parser *parser)
 {
@@ -527,6 +780,11 @@ cpfl_parser_init(json_object *ob_root, struct cpfl_flow_js_parser *parser)
 		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
 		return ret;
 	}
+	ret = cpfl_flow_js_mod_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
+		return ret;
+	}
 
 	return 0;
 }
@@ -601,6 +859,15 @@ cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
 		rte_free(pattern->actions);
 	}
 	rte_free(parser->patterns);
+	for (i = 0; i < parser->mr_size; i++) {
+		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
+
+		if (!mr)
+			continue;
+		rte_free(mr->key.actions);
+		rte_free(mr->action.mod.layout);
+	}
+	rte_free(parser->modifications);
 	rte_free(parser);
 
 	return 0;
@@ -617,6 +884,17 @@ cpfl_get_items_length(const struct rte_flow_item *items)
 	return length;
 }
 
+static int
+cpfl_get_actions_length(const struct rte_flow_action *actions)
+{
+	int length = 0;
+	const struct rte_flow_action *action = actions;
+
+	while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END)
+		continue;
+	return length;
+}
+
 static int
 cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
 		       uint16_t offset, uint8_t *fv)
@@ -645,7 +923,7 @@ cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item
 				break;
 			}
 			layer++;
-		} /* TODO: more type... */
+		}
 	}
 
 	return 0;
@@ -1234,6 +1512,260 @@ cpfl_flow_parse_items(struct cpfl_itf *itf,
 	return -EINVAL;
 }
 
+/* modifications rules */
+static int
+cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap *encap,
+			       const struct rte_flow_action *action)
+{
+	const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+	struct rte_flow_item *definition;
+	int def_length, i, proto_size;
+
+	action_vxlan_encap = (const struct rte_flow_action_vxlan_encap *)action->conf;
+	definition = action_vxlan_encap->definition;
+	def_length = cpfl_get_items_length(definition);
+	proto_size = encap->proto_size;
+	if (proto_size != def_length - 1) {
+		PMD_DRV_LOG(DEBUG, "protocols not match.");
+		return -EINVAL;
+	}
+	for (i = 0; i < proto_size; i++) {
+		enum rte_flow_item_type proto;
+
+		proto = encap->protocols[i];
+		if (proto == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) {
+				PMD_DRV_LOG(DEBUG, "protocols not match.");
+				return -EINVAL;
+			}
+		} else if (proto != definition[i].type) {
+			PMD_DRV_LOG(DEBUG, "protocols not match.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* check and parse */
+static int
+cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int size,
+			 const struct rte_flow_action *actions,
+			 struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int actions_length, i;
+	int j = 0;
+	int ret;
+
+	actions_length = cpfl_get_actions_length(actions);
+	if (size > actions_length - 1)
+		return -EINVAL;
+	for (i = 0; i < size; i++) {
+		enum rte_flow_action_type type;
+		struct cpfl_flow_js_mr_key_action *key_act;
+
+		key_act = &key_acts[i];
+		/* mr->key->actions->type */
+		type = key_act->type;
+		/* mr->key->actions->data */
+		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			int proto_size, k;
+			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
+
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+			mr_key_action[i].encap.action = &actions[j];
+			encap = &mr_key_action[i].encap;
+
+			proto_size = key_act->encap.proto_size;
+			encap->proto_size = proto_size;
+			for (k = 0; k < proto_size; k++) {
+				enum rte_flow_item_type proto;
+
+				proto = key_act->encap.protocols[k];
+				encap->protocols[k] = proto;
+			}
+			ret = cpfl_check_actions_vxlan_encap(encap, &actions[j]);
+			if (ret < 0)
+				return -EINVAL;
+			j++;
+		} else if (type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+			j++;
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+/* output: uint8_t *buffer, uint16_t *byte_len */
+static int
+cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
+		  struct cpfl_flow_mr_key_action *mr_key_action,
+		  uint8_t *buffer, uint16_t *byte_len)
+{
+	int i;
+	int start = 0;
+
+	for (i = 0; i < layout_size; i++) {
+		int index, size, offset;
+		const char *hint;
+		const uint8_t *addr = NULL;
+		struct cpfl_flow_mr_key_action *temp;
+		struct cpfl_flow_js_mr_layout *layout;
+
+		layout = &layouts[i];
+		/* index links to the element of the actions array. */
+		index = layout->index;
+		size = layout->size;
+		offset = layout->offset;
+		if (index == -1) {
+			hint = "dummpy";
+			start += size;
+			continue;
+		}
+		hint = layout->hint;
+		temp = mr_key_action + index;
+		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+			struct rte_flow_item *definition;
+			int def_length, k;
+
+			action_vxlan_encap =
+			    (const struct rte_flow_action_vxlan_encap *)temp->encap.action->conf;
+			definition = action_vxlan_encap->definition;
+			def_length = cpfl_get_items_length(definition);
+			for (k = 0; k < def_length - 1; k++) {
+				if ((strcmp(hint, "eth") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_ETH) ||
+				    (strcmp(hint, "ipv4") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+				    (strcmp(hint, "udp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_UDP) ||
+				    (strcmp(hint, "tcp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_TCP) ||
+				    (strcmp(hint, "vxlan") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_VXLAN)) {
+					addr = (const uint8_t *)(definition[k].spec);
+					if (start > 255) {
+						*byte_len = 0;
+						PMD_DRV_LOG(ERR, "byte length is too long: %s",
+							    hint);
+						return -EINVAL;
+					}
+					memcpy(buffer + start, addr + offset, size);
+					break;
+				} /* TODO: more hint... */
+			}
+			if (k == def_length - 1) {
+				*byte_len = 0;
+				PMD_DRV_LOG(ERR, "can not find corresponding hint: %s", hint);
+				return -EINVAL;
+			}
+		} else {
+			*byte_len = 0;
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", temp->type);
+			return -EINVAL;
+		} /* else TODO: more type... */
+		start += size;
+	}
+	*byte_len = start;
+
+	return 0;
+}
+
+static int
+cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action,
+		     struct cpfl_flow_mr_key_action *mr_key_action,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	enum cpfl_flow_mr_action_type type;
+
+	/* mr->action->type */
+	type = action->type;
+	/* mr->action->data */
+	if (type == CPFL_JS_MR_ACTION_TYPE_MOD) {
+		struct cpfl_flow_js_mr_layout *layout;
+
+		mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		mr_action->mod.byte_len = 0;
+		mr_action->mod.prof = action->mod.prof;
+		layout = action->mod.layout;
+		if (!layout)
+			return 0;
+		memset(mr_action->mod.data, 0, sizeof(mr_action->mod.data));
+
+		return cpfl_parse_layout(layout, action->mod.layout_size, mr_key_action,
+					 mr_action->mod.data, &mr_action->mod.byte_len);
+	}
+	PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+
+	return -EINVAL;
+}
+
+static int
+cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action *actions,
+		   struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int key_action_size;
+
+	/* mr->key->actions */
+	key_action_size = mr->key.actions_size;
+	return cpfl_parse_mr_key_action(mr->key.actions, key_action_size, actions, mr_key_action);
+}
+
+/* output: struct cpfl_flow_mr_action *mr_action */
+static int
+cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	int i;
+	struct cpfl_flow_mr_key_action mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0};
+
+	for (i = 0; i < parser->mr_size; i++) {
+		int ret;
+		struct cpfl_flow_js_mr *mr;
+
+		mr = &parser->modifications[i];
+		if (!mr)
+			return -EINVAL;
+		ret = cpfl_check_mod_key(mr, actions, mr_key_action);
+		if (ret < 0)
+			continue;
+		/* mr->action */
+		return cpfl_parse_mr_action(&mr->action, mr_key_action, mr_action);
+	}
+
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+			struct cpfl_flow_mr_action *mr_action)
+{
+	/* modifications rules */
+	if (!parser->modifications) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	return cpfl_parse_mod_rules(parser, actions, mr_action);
+}
+
 bool
 cpfl_metadata_write_port_id(struct cpfl_itf *itf)
 {
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
index af64a158a8..2618a9a81f 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.h
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -105,9 +105,79 @@ struct cpfl_flow_js_pr {
 	uint16_t actions_size;
 };
 
+/* Modification Rules Storage */
+/**
+ * The vxlan_encap action matches RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
+ * and include a sequence of protocol headers defined in field protocols
+ * of data.
+ */
+struct cpfl_flow_js_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	int proto_size;
+};
+
+/* A set of modification rte_flow_action_xxx objects can be defined as a type / data pair. */
+struct cpfl_flow_js_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_js_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_js_mr_key {
+	struct cpfl_flow_js_mr_key_action *actions;
+	int actions_size;
+};
+
+struct cpfl_flow_js_mr_layout {
+	int index;				/* links to the element of the actions array */
+	char hint[CPFL_FLOW_JSON_STR_SIZE_MAX]; /* where the data to copy from */
+	uint16_t offset;			/* the start byte of the data to copy from */
+	uint16_t size; /*  bytes of the data to be copied to the memory region */
+};
+
+/** For mod data, besides the profile ID, a layout array defines a set of hints that helps
+ * driver composing the MOD memory region when the action need to insert/update some packet
+ * data from user input.
+ */
+struct cpfl_flow_js_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	struct cpfl_flow_js_mr_layout *layout;
+	int layout_size;
+};
+
+enum cpfl_flow_mr_action_type {
+	CPFL_JS_MR_ACTION_TYPE_MOD,
+};
+
+/** Currently, the type can only be mod.
+ *
+ * For mod data, besides the profile ID, a layout array defines a set
+ * of hints that helps driver composing the MOD memory region when the
+ * action need to insert/update some packet data from user input.
+ */
+struct cpfl_flow_js_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_js_mr_action_mod mod;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD to parse rte_flow modification
+ * actions. Each rule be described by a pair of key and action
+ */
+struct cpfl_flow_js_mr {
+	struct cpfl_flow_js_mr_key key;
+	struct cpfl_flow_js_mr_action action;
+};
+
 struct cpfl_flow_js_parser {
 	struct cpfl_flow_js_pr *patterns;
 	int pr_size;
+	struct cpfl_flow_js_mr *modifications;
+	int mr_size;
 };
 
 /* Pattern Rules */
@@ -125,6 +195,33 @@ struct cpfl_flow_pr_action {
 	};
 };
 
+/* Modification Rules */
+struct cpfl_flow_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	uint16_t proto_size;
+	const struct rte_flow_action *action;
+};
+
+struct cpfl_flow_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	uint8_t data[256];
+};
+
+struct cpfl_flow_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_mr_action_mod mod;
+	};
+};
+
 int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
 int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
 int cpfl_flow_parse_items(struct cpfl_itf *itf,
@@ -132,6 +229,9 @@ int cpfl_flow_parse_items(struct cpfl_itf *itf,
 			  const struct rte_flow_item *items,
 			  const struct rte_flow_attr *attr,
 			  struct cpfl_flow_pr_action *pr_action);
+int cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser,
+			    const struct rte_flow_action *actions,
+			    struct cpfl_flow_mr_action *mr_action);
 bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
 bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
 bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v4 3/9] net/cpfl: set up rte flow skeleton
  2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
  2023-08-15 16:50       ` [PATCH v4 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
  2023-08-15 16:50       ` [PATCH v4 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
@ 2023-08-15 16:50       ` Zhang, Yuying
  2023-08-15 16:50       ` [PATCH v4 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
                         ` (5 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-15 16:50 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Set up the rte_flow backend skeleton. Introduce the framework
to support different engines as rte_flow backend. Bridge rte_flow
driver API to flow engines.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  53 ++++++
 drivers/net/cpfl/cpfl_ethdev.h |   5 +
 drivers/net/cpfl/cpfl_flow.c   | 339 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow.h   |  85 +++++++++
 drivers/net/cpfl/meson.build   |   1 +
 5 files changed, 483 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 54ae127cc3..44418ce325 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -15,6 +15,7 @@
 #include "cpfl_ethdev.h"
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1074,6 +1075,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+	struct rte_flow *p_flow;
+
+	while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+		TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+		if (p_flow->engine->free)
+			p_flow->engine->free(p_flow);
+		rte_free(p_flow);
+	}
+}
+
 static int
 cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
 {
@@ -1105,6 +1119,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
 		cpfl_p2p_queue_grps_del(vport);
 
+	cpfl_flow_free(cpfl_vport);
 	idpf_vport_deinit(vport);
 	rte_free(cpfl_vport->p2p_q_chunks_info);
 
@@ -1117,6 +1132,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+		      const struct rte_flow_ops **ops)
+{
+	struct cpfl_itf *itf;
+
+	if (!dev)
+		return -EINVAL;
+
+	itf = CPFL_DEV_TO_ITF(dev);
+
+	/* only vport support rte_flow */
+	if (itf->type != CPFL_ITF_TYPE_VPORT)
+		return -ENOTSUP;
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static int
 cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 			    size_t len, uint32_t tx)
@@ -1318,6 +1356,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.xstats_get			= cpfl_dev_xstats_get,
 	.xstats_get_names		= cpfl_dev_xstats_get_names,
 	.xstats_reset			= cpfl_dev_xstats_reset,
+	.flow_ops_get			= cpfl_dev_flow_ops_get,
 	.hairpin_cap_get		= cpfl_hairpin_cap_get,
 	.rx_hairpin_queue_setup		= cpfl_rx_hairpin_queue_setup,
 	.tx_hairpin_queue_setup		= cpfl_tx_hairpin_queue_setup,
@@ -2021,6 +2060,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	ret = cpfl_flow_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init flow module");
+		goto err_flow_init;
+	}
+#endif
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2028,6 +2074,9 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+err_flow_init:
+#endif
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2182,6 +2231,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
 	cpfl_vport->itf.adapter = adapter;
 	cpfl_vport->itf.data = dev->data;
+	TAILQ_INIT(&cpfl_vport->itf.flow_list);
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
@@ -2262,6 +2312,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	cpfl_flow_uninit(adapter);
+#endif
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 383dbd14c6..69bf32cfbd 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -140,9 +140,12 @@ enum cpfl_itf_type {
 	CPFL_ITF_TYPE_REPRESENTOR,
 };
 
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
+	struct cpfl_flow_list flow_list;
 	void *data;
 };
 
@@ -206,6 +209,8 @@ struct cpfl_adapter_ext {
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
 
+	struct cpfl_flow_js_parser *flow_parser;
+
 	struct cpfl_metadata meta;
 };
 
diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c
new file mode 100644
index 0000000000..03dd1ffa44
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.c
@@ -0,0 +1,339 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+	TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->parse_pattern_action)
+			continue;
+
+		if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+			continue;
+		return engine;
+	}
+
+	return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+	int ret;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->init) {
+			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+				     engine->type);
+			return -ENOTSUP;
+		}
+
+		ret = engine->init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+				     engine->type);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (engine->uninit)
+			engine->uninit(adapter);
+	}
+}
+
+static int
+cpfl_flow_attr_valid(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	if (attr->priority > CPFL_PREC_MAX) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Only support priority 0-7.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	int ret;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = cpfl_flow_attr_valid(attr, error);
+	if (ret)
+		return ret;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+__cpfl_flow_validate(struct rte_eth_dev *dev,
+		     const struct rte_flow_attr *attr,
+		     const struct rte_flow_item pattern[],
+		     const struct rte_flow_action actions[],
+		     void **meta,
+		     struct cpfl_flow_engine **engine,
+		     struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+	if (ret)
+		return ret;
+
+	*engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta);
+	if (!*engine) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched engine.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	int ret;
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error);
+
+	return ret;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_engine *engine = NULL;
+	struct rte_flow *flow;
+	void *meta;
+	int ret;
+
+	flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return NULL;
+	}
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	if (!engine->create) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched flow creation function");
+		rte_free(flow);
+		return NULL;
+	}
+
+	ret = engine->create(dev, flow, meta, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	flow->engine = engine;
+	TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+	return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	int ret = 0;
+
+	if (!flow || !flow->engine || !flow->engine->destroy) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	ret = flow->engine->destroy(dev, flow, error);
+	if (!ret)
+		TAILQ_REMOVE(&itf->flow_list, flow, next);
+	else
+		PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+	return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct rte_flow *p_flow;
+	void *temp;
+	int ret = 0;
+
+	RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+		ret = cpfl_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to flush flows");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		const struct rte_flow_action *actions,
+		void *data,
+		struct rte_flow_error *error)
+{
+	struct rte_flow_query_count *count = data;
+	int ret = -EINVAL;
+
+	if (!flow || !flow->engine || !flow->engine->query_count) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			ret = flow->engine->query_count(dev, flow, count, error);
+			break;
+		default:
+			ret = rte_flow_error_set(error, ENOTSUP,
+						 RTE_FLOW_ERROR_TYPE_ACTION,
+						 actions,
+						 "action not supported");
+			break;
+		}
+	}
+
+	return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+	.validate = cpfl_flow_validate,
+	.create = cpfl_flow_create,
+	.destroy = cpfl_flow_destroy,
+	.flush = cpfl_flow_flush,
+	.query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+	int ret;
+
+	if (ad->devargs.flow_parser[0] == '\0') {
+		PMD_INIT_LOG(WARNING, "flow module is not initialized");
+		return 0;
+	}
+
+	ret = cpfl_flow_engine_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init flow engines");
+		goto err;
+	}
+
+	ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to create flow parser");
+		goto err;
+	}
+
+	return ret;
+
+err:
+	cpfl_flow_engine_uninit(ad);
+	return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+	if (ad->devargs.flow_parser[0] == '\0')
+		return;
+
+	cpfl_parser_destroy(ad->flow_parser);
+	cpfl_flow_engine_uninit(ad);
+}
diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h
new file mode 100644
index 0000000000..8c19b853ca
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.h
@@ -0,0 +1,85 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#define CPFL_PREC_MAX 7
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+	CPFL_FLOW_ENGINE_NONE = 0,
+	CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+			       struct rte_flow *flow,
+			       void *meta,
+			       struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+				struct rte_flow *flow,
+				struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+			      struct rte_flow *flow,
+			      struct rte_flow_query_count *count,
+			      struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+					     const struct rte_flow_attr *attr,
+					     const struct rte_flow_item pattern[],
+					     const struct rte_flow_action actions[],
+					     void **meta);
+
+struct cpfl_flow_engine {
+	TAILQ_ENTRY(cpfl_flow_engine) node;
+	enum cpfl_flow_engine_type type;
+	engine_init_t init;
+	engine_uninit_t uninit;
+	engine_create_t create;
+	engine_destroy_t destroy;
+	engine_query_t query_count;
+	engine_free_t free;
+	engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) next;
+	struct cpfl_flow_engine *engine;
+	void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev,
+						const struct rte_flow_attr *attr,
+						const struct rte_flow_item pattern[],
+						const struct rte_flow_action actions[],
+						void **meta);
+int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+				  const struct rte_flow_attr *attr,
+				  const struct rte_flow_item pattern[],
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+		    struct rte_flow *flow,
+		    const struct rte_flow_action *actions,
+		    void *data,
+		    struct rte_flow_error *error);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 1e0a1b0290..9f1818f8dc 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -45,6 +45,7 @@ if js_dep.found()
         message('json-c lib version is too low')
     else
         sources += files(
+		'cpfl_flow.c',
                 'cpfl_flow_parser.c',
         )
         dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v4 4/9] net/cpfl: add FXP low level implementation
  2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
                         ` (2 preceding siblings ...)
  2023-08-15 16:50       ` [PATCH v4 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
@ 2023-08-15 16:50       ` Zhang, Yuying
  2023-08-15 16:50       ` [PATCH v4 5/9] net/cpfl: add fxp rule module Zhang, Yuying
                         ` (4 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-15 16:50 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Add FXP low level implementation for CPFL rte_flow to
create/delete rules as well as setup the control vport
and control queue.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_actions.h  | 858 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.c | 379 ++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  51 ++
 drivers/net/cpfl/cpfl_ethdev.c   | 270 ++++++++++
 drivers/net/cpfl/cpfl_ethdev.h   |  14 +
 drivers/net/cpfl/cpfl_rules.c    | 126 +++++
 drivers/net/cpfl/cpfl_rules.h    | 306 +++++++++++
 drivers/net/cpfl/cpfl_vchnl.c    | 144 ++++++
 drivers/net/cpfl/meson.build     |   2 +
 9 files changed, 2150 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h
new file mode 100644
index 0000000000..7b82119e39
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_actions.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_ACTIONS_H_
+#define _CPFL_ACTIONS_H_
+
+#include "base/idpf_osdep.h"
+
+#pragma pack(1)
+
+union cpfl_action_set {
+	uint32_t data;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 4;
+		uint32_t tag : 1;
+		uint32_t prec : 3;
+	} set_24b_a;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 3;
+		uint32_t tag : 2;
+		uint32_t prec : 3;
+	} set_24b_b;
+
+	struct {
+		uint32_t val : 16;
+		uint32_t idx : 4;
+		uint32_t unused : 6;
+		uint32_t tag : 3;
+		uint32_t prec : 3;
+	} set_16b;
+
+	struct {
+		uint32_t val_a : 8;
+		uint32_t val_b : 8;
+		uint32_t idx_a : 4;
+		uint32_t idx_b : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_8b;
+
+	struct {
+		uint32_t val : 10;
+		uint32_t ena : 10;
+		uint32_t idx : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_1b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} nop;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} chained_24b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} aux_flags;
+};
+
+struct cpfl_action_set_ext {
+#define CPFL_ACTION_SET_EXT_CNT 2
+	union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT];
+};
+
+#pragma pack()
+
+/**
+ * cpfl_act_nop - Encode a NOP action
+ */
+static inline union cpfl_action_set
+cpfl_act_nop(void)
+{
+	union cpfl_action_set act;
+
+	act.data = 0;
+	return act;
+}
+
+/**
+ * cpfl_is_nop_action - Indicate if an action set is a NOP
+ */
+static inline bool
+cpfl_is_nop_action(union cpfl_action_set *act)
+{
+	return act->data == cpfl_act_nop().data;
+}
+
+#define CPFL_MAKE_MASK32(b, s)	((((uint32_t)1 << (b)) - 1) << (s))
+
+#define CPFL_ACT_PREC_MAX	7
+#define CPFL_ACT_PREC_S		29
+#define CPFL_ACT_PREC_M		CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S)
+#define CPFL_ACT_PREC_SET(p)	\
+	(((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M)
+#define CPFL_ACT_PREC_CHECK(p)	((p) > 0 && (p) <= CPFL_ACT_PREC_MAX)
+
+#define CPFL_METADATA_ID_CNT		32	/* Max number of metadata IDs */
+#define CPFL_METADATA_STRUCT_MAX_SZ	128	/* Max metadata size per ID */
+
+/*******************************************************************************
+ * 1-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_1B_OP_S	24
+#define CPFL_ACT_1B_OP_M	CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S)
+#define CPFL_ACT_1B_OP		((uint32_t)(0x01) << CPFL_ACT_1B_OP_S)
+
+#define CPFL_ACT_1B_VAL_S	0
+#define CPFL_ACT_1B_VAL_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S)
+#define CPFL_ACT_1B_EN_S	10
+#define CPFL_ACT_1B_EN_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S)
+#define CPFL_ACT_1B_INDEX_S	20
+#define CPFL_ACT_1B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S)
+
+/* 1-bit actions currently uses only INDEX of 0 */
+#define CPFL_ACT_MAKE_1B(prec, en, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \
+	 ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \
+	 (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M))
+
+enum cpfl_act_1b_op {
+	CPFL_ACT_1B_OP_DROP		= 0x01,
+	CPFL_ACT_1B_OP_HDR_SPLIT	= 0x02,
+	CPFL_ACT_1B_OP_DIR_CHANGE	= 0x04,
+	CPFL_ACT_1B_OP_DEFER_DROP	= 0x08,
+	CPFL_ACT_1B_OP_ORIG_MIR_MD	= 0x80
+};
+
+#define CPFL_ACT_1B_COMMIT_MODE_S	4
+#define CPFL_ACT_1B_COMMIT_MODE_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S)
+
+/**
+ * cpfl_act_commit_mode - action commit mode for certain action classes
+ */
+enum cpfl_act_commit_mode {
+	/* Action processing for the initial classification pass */
+	CPFL_ACT_COMMIT_ALL		= 0, /* Commit all actions */
+	CPFL_ACT_COMMIT_PRE_MOD		= 1, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_NONE		= 2, /* Commit no action */
+	/* Action processing for deferred actions in a recirculation pass */
+	CPFL_ACT_COMMIT_RECIR_ALL	= 4, /* Commit all actions */
+	CPFL_ACT_COMMIT_RECIR_PRE_MOD	= 5, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_RECIR_NONE	= 6  /* Commit no action */
+};
+
+/*******************************************************************************
+ * 8-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_8B_S	24
+#define CPFL_ACT_OP_8B_M	CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S)
+#define CPFL_ACT_OP_8B		((uint32_t)(0x02) << CPFL_ACT_OP_8B_S)
+
+#define CPFL_ACT_8B_A_VAL_S	0
+#define CPFL_ACT_8B_A_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S)
+#define CPFL_ACT_8B_A_INDEX_S	16
+#define CPFL_ACT_8B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S)
+
+#define CPFL_ACT_8B_B_VAL_S	8
+#define CPFL_ACT_8B_B_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S)
+#define CPFL_ACT_8B_B_INDEX_S	20
+#define CPFL_ACT_8B_B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S)
+
+/* Unless combining two 8-bit actions into an action set, both A and B fields
+ * must be the same,
+ */
+#define CPFL_ACT_MAKE_8B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \
+	 (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \
+	 (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \
+	 (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \
+	 (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M))
+
+/* 8-Bit Action Indices */
+#define CPFL_ACT_8B_INDEX_MOD_META		9
+
+/* 8-Bit Action Miscellaneous */
+#define CPFL_ACT_8B_MOD_META_PROF_CNT		16
+#define CPFL_ACT_8B_MOD_META_VALID		0x80
+
+/*******************************************************************************
+ * 16-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_16B_S	26
+#define CPFL_ACT_OP_16B_M	CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S)
+#define CPFL_ACT_OP_16B		((uint32_t)0x1 << CPFL_ACT_OP_16B_S)
+
+#define CPFL_ACT_16B_INDEX_S	16
+#define CPFL_ACT_16B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S)
+#define CPFL_ACT_16B_VAL_S	0
+#define CPFL_ACT_16B_VAL_M	CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S)
+
+#define CPFL_ACT_MAKE_16B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \
+	 (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M))
+
+/* 16-Bit Action Indices */
+#define CPFL_ACT_16B_INDEX_COUNT_SET		0
+#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX	1
+#define CPFL_ACT_16B_INDEX_SET_VSI		2
+#define CPFL_ACT_16B_INDEX_DEL_MD		4
+#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST		5
+
+/* 16-Bit Action Miscellaneous */
+#define CPFL_ACT_16B_COUNT_SET_CNT		2048 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_SET_VSI_SLOTS		2
+#define CPFL_ACT_16B_FWD_VSI_CNT		1032 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_FWD_VSI_LIST_CNT		256
+#define CPFL_ACT_16B_MOD_VSI_LIST_CNT		1024
+#define CPFL_ACT_16B_FWD_PORT_CNT		4
+#define CPFL_ACT_16B_DEL_MD_MID_CNT		32
+#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS		4
+
+/* 16-Bit SET_MCAST_IDX Action */
+#define CPFL_ACT_16B_SET_MCAST_VALID	((uint32_t)1 << 15)
+
+/* 16-Bit SET_VSI Action Variants */
+#define CPFL_ACT_16B_SET_VSI_VAL_S		0
+#define CPFL_ACT_16B_SET_VSI_VAL_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S)
+#define CPFL_ACT_16B_SET_VSI_PE_S		11
+#define CPFL_ACT_16B_SET_VSI_PE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S)
+#define CPFL_ACT_16B_SET_VSI_TYPE_S		14
+#define CPFL_ACT_16B_SET_VSI_TYPE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S)
+
+/* 16-Bit DEL_MD Action */
+#define CPFL_ACT_16B_DEL_MD_0_S		0
+#define CPFL_ACT_16B_DEL_MD_1_S		5
+
+/* 16-Bit MOD_VSI_LIST Actions */
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S	0
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M	\
+	CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S)
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S	14
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S)
+#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \
+	((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \
+	 (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_ID_M))
+
+#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \
+	((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \
+		CPFL_ACT_16B_SET_VSI_TYPE_M) | \
+	 (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \
+		CPFL_ACT_16B_SET_VSI_PE_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \
+		CPFL_ACT_16B_SET_VSI_VAL_M))
+
+enum cpfl_prot_eng {
+	CPFL_PE_LAN = 0,
+	CPFL_PE_RDMA,
+	CPFL_PE_CRT
+};
+
+enum cpfl_act_fwd_type {
+	CPFL_ACT_FWD_VSI,
+	CPFL_ACT_FWD_VSI_LIST,
+	CPFL_ACT_FWD_PORT
+};
+
+/*******************************************************************************
+ * 24-Bit Actions
+ ******************************************************************************/
+/* Group A */
+#define CPFL_ACT_OP_24B_A_S	28
+#define CPFL_ACT_OP_24B_A_M	CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S)
+#define CPFL_ACT_24B_A_INDEX_S	24
+#define CPFL_ACT_24B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S)
+#define CPFL_ACT_24B_A_VAL_S	0
+#define CPFL_ACT_24B_A_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S)
+
+#define CPFL_ACT_OP_24B_A	((uint32_t)1 << CPFL_ACT_OP_24B_A_S)
+
+#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_MOD_ADDR	0
+#define CPFL_ACT_24B_INDEX_MIRROR_FIRST	1
+#define CPFL_ACT_24B_INDEX_COUNT	2
+#define CPFL_ACT_24B_INDEX_SET_Q	8
+#define CPFL_ACT_24B_INDEX_MOD_PROFILE	9
+#define CPFL_ACT_24B_INDEX_METER	10
+
+#define CPFL_ACT_24B_COUNT_SLOTS	6
+#define CPFL_ACT_24B_METER_SLOTS	6
+
+#define CPFL_ACT_24B_MOD_ADDR_CNT	(16 * 1024 * 1024)
+#define CPFL_ACT_24B_COUNT_ID_CNT	((uint32_t)1 << 24)
+#define CPFL_ACT_24B_SET_Q_CNT		(12 * 1024)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS	3
+
+/* 24-Bit SET_Q Action */
+#define CPFL_ACT_24B_SET_Q_Q_S		0
+#define CPFL_ACT_24B_SET_Q_Q_M		\
+	CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_S	14
+#define CPFL_ACT_24B_SET_Q_Q_RGN_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S)
+#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS	CPFL_MAKE_MASK32(1, 17)
+#define CPFL_ACT_24B_SET_Q_DST_PE_S	21
+#define CPFL_ACT_24B_SET_Q_DST_PE_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S)
+#define CPFL_ACT_24B_SET_Q_VALID	CPFL_MAKE_MASK32(1, 23)
+
+/* 24-Bit MOD_PROFILE Action */
+enum cpfl_act_mod_profile_hint {
+	CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */
+};
+
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_S		0
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S)
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S	12
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S)
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_S		14
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S)
+#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS		((uint32_t)1 << 16)
+#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND	((uint32_t)1 << 17)
+#define CPFL_ACT_24B_MOD_PROFILE_VALID			((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES	4
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT		2048
+
+/* 24-Bit METER Actions */
+#define CPFL_ACT_24B_METER_INDEX_S	0
+#define CPFL_ACT_24B_METER_INDEX_M	\
+	CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S)
+#define CPFL_ACT_24B_METER_BANK_S	20
+#define CPFL_ACT_24B_METER_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S)
+#define CPFL_ACT_24B_METER_VALID	((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_METER_BANK_CNT	6
+#define CPFL_ACT_24B_METER_INDEX_CNT	((uint32_t)1 << 20)
+
+/* Group B */
+#define CPFL_ACT_OP_24B_B_S	27
+#define CPFL_ACT_OP_24B_B_M	CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S)
+#define CPFL_ACT_24B_B_INDEX_S	24
+#define CPFL_ACT_24B_B_INDEX_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S)
+#define CPFL_ACT_24B_B_VAL_S	0
+#define CPFL_ACT_24B_B_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S)
+
+#define CPFL_ACT_OP_24B_B	((uint32_t)1 << CPFL_ACT_OP_24B_B_S)
+
+#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_SET_MD	0
+#define CPFL_ACT_24B_INDEX_RANGE_CHECK	6
+#define CPFL_ACT_24B_SET_MD_SLOTS	6
+
+/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */
+/* 8-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD8_VAL_S	0
+#define CPFL_ACT_24B_SET_MD8_VAL_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S)
+#define CPFL_ACT_24B_SET_MD8_MASK_S	8
+#define CPFL_ACT_24B_SET_MD8_MASK_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S)
+#define CPFL_ACT_24B_SET_MD8_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD8_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S)
+/* 16-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD16_VAL_S	0
+#define CPFL_ACT_24B_SET_MD16_VAL_M	\
+	CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_L_S	16 /* For chained action */
+#define CPFL_ACT_24B_SET_MD16_MASK_L_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_H_SR	8
+#define CPFL_ACT_24B_SET_MD16_MASK_H_M	0xff
+#define CPFL_ACT_24B_SET_MD16_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD16_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD16		((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_SET_MD32_VAL_L_M	CPFL_MAKE_MASK32(24, 0)
+
+#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX		15
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX	7
+#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX	15
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX	7
+
+/* RANGE_CHECK Action */
+enum cpfl_rule_act_rc_mode {
+	CPFL_RULE_ACT_RC_1_RANGE = 0,
+	CPFL_RULE_ACT_RC_2_RANGES = 1,
+	CPFL_RULE_ACT_RC_4_RANGES = 2,
+	CPFL_RULE_ACT_RC_8_RANGES = 3
+};
+
+#define CPFL_ACT_24B_RC_TBL_IDX_S	0
+#define CPFL_ACT_24B_RC_TBL_IDX_M	\
+	CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S)
+#define CPFL_ACT_24B_RC_START_BANK_S	13
+#define CPFL_ACT_24B_RC_START_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S)
+#define CPFL_ACT_24B_RC_MODE_S		16
+#define CPFL_ACT_24B_RC_MODE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S)
+#define CPFL_ACT_24B_RC_XTRACT_PROF_S	18
+#define CPFL_ACT_24B_RC_XTRACT_PROF_M	\
+	CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S)
+
+#define CPFL_ACT_24B_RC_TBL_INDEX_CNT	(8 * 1024)
+#define CPFL_ACT_24B_RC_BANK_CNT	8
+#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT	64
+
+/*******************************************************************************
+ * 24-Bit Chained Auxiliary Actions
+ ******************************************************************************/
+
+/* TODO: HAS is being updated.  Revise the order of chained and base action
+ * when the HAS has it finalized.
+ */
+/**
+ * 24-Bit Chained SET_MD Actions
+ *
+ * Chained SET_MD actions consume two consecutive action sets.  The first one is
+ * the chained AUX action set.  The second one is the base/parent action set.
+ * Chained SET_MD actions can add and/or update metadata structure with IDs from
+ * 0 to 31 while the non-chained SET_MD variants can only update existing meta-
+ * data IDs below 16.
+ */
+
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S	8
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M	\
+	CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD_AUX_ADD		((uint32_t)1 << 15)
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S	16
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_S		0
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_M		\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S	0
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S)
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR	24 /* Upper 8 bits of MD32 */
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M	0xff
+
+#define CPFL_ACT_TYPE_CHAIN_DATA_S	29
+#define CPFL_ACT_TYPE_CHAIN_DATA_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S)
+#define CPFL_ACT_TYPE_CHAIN_DATA	((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_OP_S	21
+#define CPFL_ACT_24B_SET_MD_OP_8B	((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_16B	((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_32B	((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \
+	(CPFL_ACT_TYPE_CHAIN_DATA | (op) | \
+	 (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \
+	 (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \
+	 (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_DATA_M))
+
+/*******************************************************************************
+ * 1-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_drop - Encode a 1-bit DROP action
+ *
+ * The DROP action has precedence over the DEFER_DOP action.
+ * Affect of ACT_COMMIT action on the DROP action:
+ *  - CPFL_ACT_COMMIT_ALL: Packet is dropped.
+ *  - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped.
+ *  - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped.  Recirculation is canceled.
+ *  - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not
+ *    dropped. Recirculation continues.
+ *
+ * Once a DROP action is set, it cannot be reverted during the classification
+ * process of a network packet.
+ */
+static inline union cpfl_action_set
+cpfl_act_drop(uint8_t prec)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1);
+	return a;
+}
+
+/**
+ * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action
+ * An ACT_COMMIT action specifies if and when all actions are committed.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M,
+				  (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S);
+	return a;
+}
+
+/*******************************************************************************
+ * 8-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_meta - Encode an 8-bit MOD_META action
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_meta(uint8_t prec, uint8_t prof)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META,
+				  CPFL_ACT_8B_MOD_META_VALID | prof);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 16-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI)
+ *
+ * This encodes the "Forward to Single VSI" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    vsi >= CPFL_ACT_16B_FWD_VSI_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port)
+ *
+ * This encodes the "Forward to a port" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    port >= CPFL_ACT_16B_FWD_PORT_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 24-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action
+ *
+ * This MOD_ADDR specifies the index of the MOD content entry an accompanying
+ * MOD_PROFILE action uses.  Some MOD_PROFILE actions may need to use extra
+ * information from a Modify content entry, and requires an accompanying
+ * MOD_ADDR action.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR,
+				     mod_addr);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant)
+ *
+ * This action is a "Forward to a single queue" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q,
+			bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region)
+ *
+ * This action is a "Forward to a queue region" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base,
+			       uint8_t q_rgn_bits, bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT ||
+	    q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base |
+		((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action
+ *
+ * This action specifies a Modify profile to use for modifying the network
+ * packet being classified.  In addition, it also provides a hint to whether
+ * or not an accompanied MOD_ADDR action is expected and should be prefetched.
+ *
+ * There is only one MOD_PROFILE action slot.  If multiple classification blocks
+ * emit this action, the precedence value and auxiliary precedence value will be
+ * used to select one with higher precedence.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus,
+		     bool miss_prepend, enum cpfl_act_mod_profile_hint hint)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) ||
+	    prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT ||
+	    ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_MOD_PROFILE_VALID |
+		(((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) &
+			CPFL_ACT_24B_MOD_PROFILE_HINT_M) |
+		(((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) &
+			CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) |
+		((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S);
+	if (append_act_bus)
+		val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS;
+	if (miss_prepend)
+		val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND;
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_meter - Encode a 24-bit METER action
+ *
+ * Return NOP if any given input parameter is invalid.
+ *
+ * A bank can only be used by one of the METER action slots.  If multiple METER
+ * actions select the same bank, the action with the highest action slot wins.
+ * In Policer mode, METER actions at the higher indexes have precedence over
+ * ones at lower indexes.
+ */
+static inline union cpfl_action_set
+cpfl_act_meter(uint8_t slot, uint8_t prec, uint32_t idx, uint8_t bank)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS  ||
+	    idx >= CPFL_ACT_24B_METER_INDEX_CNT ||
+	    bank >= CPFL_ACT_24B_METER_BANK_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_METER_VALID |
+		(uint32_t)idx << CPFL_ACT_24B_METER_INDEX_S |
+		(uint32_t)bank << CPFL_ACT_24B_METER_BANK_S;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot,
+				     val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot
+ *
+ * This SET_MD action sets/updates a byte of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 bytes of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX ||
+	    off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) |
+		((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) |
+		((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot
+ *
+ * This SET_MD action sets/updates a word of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 words of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX ||
+	    word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) |
+		((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) |
+		((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot
+ *
+ * This SET_MD action sets/updates a dword of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action is made up of 2 chained
+ * action sets.  The chained action set is the first.  The base/parent action
+ * sets is the second.
+ */
+static inline void
+cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid,
+		      uint8_t off, uint32_t val)
+{
+	if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) ||
+	    mid >= CPFL_METADATA_ID_CNT ||
+	    (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) {
+		ext->acts[0] = cpfl_act_nop();
+		ext->acts[1] = cpfl_act_nop();
+	} else {
+		uint32_t tmp;
+
+		/* Chained action set comes first */
+		tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR;
+		ext->acts[0].data =
+			CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B,
+						     mid, off, tmp);
+
+		/* Lower 24 bits of value */
+		tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M;
+		ext->acts[1].data =
+			CPFL_ACT_MAKE_24B_B(prec,
+					    CPFL_ACT_24B_INDEX_SET_MD + slot,
+					    tmp);
+	}
+}
+
+#endif /* _CPFL_ACTIONS_H_ */
diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
new file mode 100644
index 0000000000..476c78f235
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include "cpfl_controlq.h"
+#include "base/idpf_controlq.h"
+#include "rte_common.h"
+
+/**
+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
+ * @qinfo: pointer to create control queue info struct
+ *
+ * Verify that DMA parameter of each DMA memory struct is present and
+ * consistent with control queue parameters
+ */
+static inline int
+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
+{
+	struct idpf_dma_mem *ring = &qinfo->ring_mem;
+	struct idpf_dma_mem *buf = &qinfo->buf_mem;
+
+	if (!ring->va || !ring->size)
+		return -EBADR;
+
+	if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
+		return -EINVAL;
+
+	/* no need for buffer checks for TX queues */
+	if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
+		return 0;
+
+	if (!buf->va || !buf->size)
+		return -EBADR;
+
+	/* accommodate different types of rx ring buffer sizes */
+	if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
+	     buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
+	    (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
+	     buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
+		return -EBADR;
+
+	return 0;
+}
+
+/**
+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ * @qinfo: pointer to create queue info struct
+ *
+ * The CP takes care of all DMA memory allocations. Store the allocated memory
+ * information for the descriptor ring and buffers. If the memory for either the
+ * descriptor ring or the buffers is not allocated properly and/or inconsistent
+ * with the control queue parameters, this routine will free the memory for
+ * both the descriptors and the buffers
+ */
+int
+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
+			 struct cpfl_ctlq_create_info *qinfo)
+{
+	int ret_code = 0;
+	unsigned int elem_size;
+	int i = 0;
+
+	ret_code = cpfl_check_dma_mem_parameters(qinfo);
+	if (ret_code)
+		/* TODO: Log an error message per CP */
+		goto err;
+
+	cq->desc_ring.va = qinfo->ring_mem.va;
+	cq->desc_ring.pa = qinfo->ring_mem.pa;
+	cq->desc_ring.size = qinfo->ring_mem.size;
+
+	switch (cq->cq_type) {
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_RDMA_RX:
+		/* Only receive queues will have allocated buffers
+		 * during init.  CP allocates one big chunk of DMA
+		 * region who size is equal to ring_len * buff_size.
+		 * In CPFLib, the block gets broken down to multiple
+		 * smaller blocks that actually gets programmed in the hardware.
+		 */
+
+		cq->bi.rx_buff = (struct idpf_dma_mem **)
+			idpf_calloc(hw, cq->ring_size,
+				    sizeof(struct idpf_dma_mem *));
+		if (!cq->bi.rx_buff) {
+			ret_code = -ENOMEM;
+			/* TODO: Log an error message per CP */
+			goto err;
+		}
+
+		elem_size = qinfo->buf_size;
+		for (i = 0; i < cq->ring_size; i++) {
+			cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
+					    (hw, 1,
+					     sizeof(struct idpf_dma_mem));
+			if (!cq->bi.rx_buff[i]) {
+				ret_code = -ENOMEM;
+				goto free_rx_buffs;
+			}
+			cq->bi.rx_buff[i]->va =
+			    (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));
+			cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
+					       (i * elem_size);
+			cq->bi.rx_buff[i]->size = elem_size;
+		}
+		break;
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_RDMA_TX:
+	case IDPF_CTLQ_TYPE_RDMA_COMPL:
+		break;
+	default:
+		ret_code = -EBADR;
+	}
+
+	return ret_code;
+
+free_rx_buffs:
+	i--;
+	for (; i >= 0; i--)
+		idpf_free(hw, cq->bi.rx_buff[i]);
+
+	if (!cq->bi.rx_buff)
+		idpf_free(hw, cq->bi.rx_buff);
+
+err:
+	return ret_code;
+}
+
+/**
+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	for (i = 0; i < cq->ring_size; i++) {
+		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+		/* No buffer to post to descriptor, continue */
+		if (!bi)
+			continue;
+
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+		desc->opcode = 0;
+		desc->datalen = CPU_TO_LE16(bi->size);
+		desc->ret_val = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+		desc->params.indirect.param0 = 0;
+		desc->params.indirect.param1 = 0;
+	}
+}
+
+/**
+ * cpfl_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
+{
+	/* set control queue registers in our local struct */
+	cq->reg.head = q_create_info->reg.head;
+	cq->reg.tail = q_create_info->reg.tail;
+	cq->reg.len = q_create_info->reg.len;
+	cq->reg.bah = q_create_info->reg.bah;
+	cq->reg.bal = q_create_info->reg.bal;
+	cq->reg.len_mask = q_create_info->reg.len_mask;
+	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+	cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * cpfl_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
+{
+	/* Update tail to post pre-allocated buffers for rx queues */
+	if (is_rxq)
+		wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
+
+	/* For non-Mailbox control queues only TAIL need to be set */
+	if (cq->q_id != -1)
+		return;
+
+	/* Clear Head for both send or receive */
+	wr32(hw, cq->reg.head, 0);
+
+	/* set starting point */
+	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
+ * @hw: context info for the callback
+ * @cq: pointer to the specific control queue
+ *
+ * DMA buffers are released by the CP itself
+ */
+static void
+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
+{
+	int i;
+
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
+	    cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
+		for (i = 0; i < cq->ring_size; i++)
+			idpf_free(hw, cq->bi.rx_buff[i]);
+		/* free the buffer header */
+		idpf_free(hw, cq->bi.rx_buff);
+	} else {
+		idpf_free(hw, cq->bi.tx_msg);
+	}
+}
+
+/**
+ * cpfl_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ */
+int
+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+	      struct idpf_ctlq_info **cq_out)
+{
+	struct idpf_ctlq_info *cq;
+	bool is_rxq = false;
+	int status = 0;
+
+	if (!qinfo->len || !qinfo->buf_size ||
+	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+		return -EINVAL;
+
+	cq = (struct idpf_ctlq_info *)
+	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+
+	if (!cq)
+		return -ENOMEM;
+
+	cq->cq_type = qinfo->type;
+	cq->q_id = qinfo->id;
+	cq->buf_size = qinfo->buf_size;
+	cq->ring_size = qinfo->len;
+
+	cq->next_to_use = 0;
+	cq->next_to_clean = 0;
+	cq->next_to_post = cq->ring_size - 1;
+
+	switch (qinfo->type) {
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+		is_rxq = true;
+		/* fallthrough */
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+		status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
+		break;
+
+	default:
+		status = -EINVAL;
+		break;
+	}
+
+	if (status)
+		goto init_free_q;
+
+	if (is_rxq) {
+		cpfl_ctlq_init_rxq_bufs(cq);
+	} else {
+		/* Allocate the array of msg pointers for TX queues */
+		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
+			idpf_calloc(hw, qinfo->len,
+				    sizeof(struct idpf_ctlq_msg *));
+		if (!cq->bi.tx_msg) {
+			status = -ENOMEM;
+			goto init_dealloc_q_mem;
+		}
+	}
+
+	cpfl_ctlq_setup_regs(cq, qinfo);
+
+	cpfl_ctlq_init_regs(hw, cq, is_rxq);
+
+	idpf_init_lock(&cq->cq_lock);
+
+	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
+
+	*cq_out = cq;
+	return status;
+
+init_dealloc_q_mem:
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+	idpf_free(hw, cq);
+	cq = NULL;
+
+	return status;
+}
+
+int
+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+		    struct idpf_ctlq_info **cq)
+{
+	return cpfl_ctlq_add(hw, qinfo, cq);
+}
+
+/**
+ * cpfl_ctlq_shutdown - shutdown the CQ
+ * The main shutdown routine for any controq queue
+ */
+static void
+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (!cq->ring_size)
+		goto shutdown_sq_out;
+
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+
+	/* Set ring_size to 0 to indicate uninitialized queue */
+	cq->ring_size = 0;
+
+shutdown_sq_out:
+	idpf_release_lock(&cq->cq_lock);
+	idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * cpfl_ctlq_remove - deallocate and remove specified control queue
+ */
+static void
+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	LIST_REMOVE(cq, cq_list);
+	cpfl_ctlq_shutdown(hw, cq);
+	idpf_free(hw, cq);
+}
+
+void
+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	cpfl_ctlq_remove(hw, cq);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
new file mode 100644
index 0000000000..930d717f63
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CONTROLQ_H_
+#define _CPFL_CONTROLQ_H_
+
+#include "base/idpf_osdep.h"
+#include "base/idpf_controlq_api.h"
+
+#define CPFL_CTLQ_DESCRIPTOR_SIZE	32
+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE	4096
+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE	256
+#define CPFL_DFLT_MBX_RING_LEN		512
+#define CPFL_CFGQ_RING_LEN		512
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct cpfl_ctlq_create_info {
+	enum idpf_ctlq_type type;
+	int id; /* absolute queue offset passed as input
+		 * -1 for default mailbox if present
+		 */
+	uint16_t len; /* Queue length passed as input */
+	uint16_t buf_size; /* buffer size passed as input */
+	uint64_t base_address; /* output, HPA of the Queue start  */
+	struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+	/* Pass down previously allocated descriptor ring and buffer memory
+	 * for each control queue to be created
+	 */
+	struct idpf_dma_mem ring_mem;
+	/* The CP will allocate one large buffer that the CPFlib will piece
+	 * into individual buffers for each descriptor
+	 */
+	struct idpf_dma_mem buf_mem;
+
+	int ext_info_size;
+	void *ext_info; /* Specific to q type */
+};
+
+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
+			     struct idpf_ctlq_info *cq,
+			     struct cpfl_ctlq_create_info *qinfo);
+int cpfl_ctlq_add(struct idpf_hw *hw,
+		  struct cpfl_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq);
+int cpfl_vport_ctlq_add(struct idpf_hw *hw,
+			struct cpfl_ctlq_create_info *qinfo,
+			struct idpf_ctlq_info **cq);
+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+#endif
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 44418ce325..88e2ecf754 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1698,6 +1698,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 		return;
 	}
 
+	/* ignore if it is ctrl vport */
+	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
+		return;
+
 	vport = cpfl_find_vport(adapter, vc_event->vport_id);
 	if (!vport) {
 		PMD_DRV_LOG(ERR, "Can't find vport.");
@@ -1893,6 +1897,262 @@ cpfl_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 }
 
+static int
+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	ret = cpfl_config_ctlq_tx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
+		return ret;
+	}
+
+	ret = cpfl_config_ctlq_rx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
+		return ret;
+	}
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	int i;
+
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (adapter->ctlqp[i])
+			cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+}
+
+static int
+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_ctlq_info *cfg_cq;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		cfg_cq = NULL;
+		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
+					  &adapter->cfgq_info[i],
+					  &cfg_cq);
+		if (ret || !cfg_cq) {
+			PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d",
+				    adapter->cfgq_info[i].id);
+			cpfl_remove_cfgqs(adapter);
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
+			    adapter->cfgq_info[i].id);
+		adapter->ctlqp[i] = cfg_cq;
+	}
+
+	return ret;
+}
+
+#define CPFL_CFGQ_RING_LEN		512
+#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
+#define CPFL_CFGQ_BUFFER_SIZE		256
+#define CPFL_CFGQ_RING_SIZE		512
+
+static int
+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	struct cpfl_vport *vport;
+	int i, err;
+	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);
+	uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;
+
+	vport = &adapter->ctrl_vport;
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (i % 2 == 0) {
+			/* Setup Tx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +
+				i / 2 * vport->base.chunks_info.tx_qtail_spacing;
+		} else {
+			/* Setup Rx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +
+				i / 2 * vport->base.chunks_info.rx_qtail_spacing;
+			if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,
+						buf_size)) {
+				err = -ENOMEM;
+				goto free_mem;
+			}
+		}
+		if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,
+					ring_size)) {
+			err = -ENOMEM;
+			goto free_mem;
+		}
+	}
+	return 0;
+free_mem:
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+	return err;
+}
+
+static int
+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_create_vport *vport_info =
+		(struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;
+	int i;
+
+	vport->itf.adapter = adapter;
+	vport->base.adapter = &adapter->base;
+	vport->base.vport_id = vport_info->vport_id;
+
+	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+		if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {
+			vport->base.chunks_info.tx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.tx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.tx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {
+			vport->base.chunks_info.rx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.rx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.rx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else {
+			PMD_INIT_LOG(ERR, "Unsupported chunk type");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)
+{
+	cpfl_stop_cfgqs(adapter);
+	cpfl_remove_cfgqs(adapter);
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+}
+
+static int
+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_vc_create_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to create control vport");
+		return ret;
+	}
+
+	ret = cpfl_init_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init control vport");
+		goto err_init_ctrl_vport;
+	}
+
+	ret = cpfl_cfgq_setup(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control queues");
+		goto err_cfgq_setup;
+	}
+
+	ret = cpfl_add_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to add control queues");
+		goto err_add_cfgq;
+	}
+
+	ret = cpfl_start_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to start control queues");
+		goto err_start_cfgqs;
+	}
+
+	return 0;
+
+err_start_cfgqs:
+	cpfl_stop_cfgqs(adapter);
+err_add_cfgq:
+	cpfl_remove_cfgqs(adapter);
+err_cfgq_setup:
+err_init_ctrl_vport:
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+
+	return ret;
+}
+
 static struct virtchnl2_get_capabilities req_caps = {
 	.csum_caps =
 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
@@ -2060,6 +2320,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+	ret = cpfl_ctrl_path_open(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control path");
+		goto err_create_ctrl_vport;
+	}
+
 #ifdef CPFL_FLOW_JSON_SUPPORT
 	ret = cpfl_flow_init(adapter);
 	if (ret) {
@@ -2076,7 +2342,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 #ifdef CPFL_FLOW_JSON_SUPPORT
 err_flow_init:
+	cpfl_ctrl_path_close(adapter);
 #endif
+err_create_ctrl_vport:
+	rte_free(adapter->vports);
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2315,6 +2584,7 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 #ifdef CPFL_FLOW_JSON_SUPPORT
 	cpfl_flow_uninit(adapter);
 #endif
+	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 69bf32cfbd..7f83d170d7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -22,6 +22,7 @@
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
 #include "cpfl_representor.h"
+#include "cpfl_controlq.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -82,6 +83,10 @@
 #define CPFL_META_CHUNK_LENGTH	1024
 #define CPFL_META_LENGTH	32
 
+#define CPFL_RX_CFGQ_NUM	4
+#define CPFL_TX_CFGQ_NUM	4
+#define CPFL_CFGQ_NUM		8
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -212,6 +217,12 @@ struct cpfl_adapter_ext {
 	struct cpfl_flow_js_parser *flow_parser;
 
 	struct cpfl_metadata meta;
+
+	/* ctrl vport and ctrl queues. */
+	struct cpfl_vport ctrl_vport;
+	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
+	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
+	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -226,6 +237,9 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 			   struct cpchnl2_vport_id *vport_id,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_info_response *response);
+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c
new file mode 100644
index 0000000000..eefae1767c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.c
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include <base/idpf_controlq.h>
+#include <stdint.h>
+#include "cpfl_rules.h"
+
+ /**
+  * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor
+  */
+static inline uint64_t
+cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
+{
+	uint64_t context = 0;
+
+	switch (cmn_cfg->opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		/* fallthrough */
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+	case cpfl_ctlq_sem_add_rule:
+	case cpfl_ctlq_sem_del_rule:
+	case cpfl_ctlq_sem_query_rule:
+	case cpfl_ctlq_sem_update_rule:
+		context |= SHIFT_VAL64(cmn_cfg->time_sel,
+				       MEV_RULE_TIME_SEL);
+		context |= SHIFT_VAL64(cmn_cfg->time_sel_val,
+				       MEV_RULE_TIME_SEL_VAL);
+		context |= SHIFT_VAL64(cmn_cfg->host_id,
+				       MEV_RULE_HOST_ID);
+		context |= SHIFT_VAL64(cmn_cfg->port_num,
+				       MEV_RULE_PORT_NUM);
+		context |= SHIFT_VAL64(cmn_cfg->resp_req,
+				       MEV_RULE_RESP_REQ);
+		context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru,
+				       MEV_RULE_CACHE_WR_THRU);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc_ctx - get bit context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
+{
+	uint64_t context = 0;
+
+	context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common);
+
+	switch (cfg_data->common.opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size,
+				       MEV_RULE_MOD_OBJ_SIZE);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content,
+				       MEV_RULE_PIN_MOD_CONTENT);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.index,
+				       MEV_RULE_MOD_INDEX);
+		break;
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
+				       MEV_RULE_OBJ_ID);
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
+				       MEV_RULE_OBJ_ADDR);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc - build descriptor data from rule config data
+ *
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg)
+{
+	uint64_t context;
+	uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0];
+
+	context = cpfl_prep_rule_desc_ctx(cfg_data);
+	*ctlq_ctx = CPU_TO_LE64(context);
+	memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t));
+	ctlq_msg->opcode = (uint16_t)cfg_data->common.opc;
+	ctlq_msg->data_len = cfg_data->common.buf_len;
+	ctlq_msg->status = 0;
+	ctlq_msg->ctx.indirect.payload = cfg_data->common.payload;
+}
+
+/**
+ * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob)
+{
+	uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions;
+	const uint32_t *act_src = (const uint32_t *)act_bytes;
+	uint32_t i;
+
+	idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
+	idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len,
+		    CPFL_NONDMA_TO_DMA);
+
+	for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
+		*act_dst++ = CPU_TO_LE32(*act_src++);
+
+	*((uint16_t *)&rule_blob->sem_rule.cfg_ctrl) = CPU_TO_LE16(cfg_ctrl);
+}
diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h
new file mode 100644
index 0000000000..d23eae8e91
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_RULES_API_H_
+#define _CPFL_RULES_API_H_
+
+#include <base/idpf_controlq_api.h>
+#include "cpfl_actions.h"
+#include "cpfl_controlq.h"
+
+/* Common Bit Mask Macros */
+#define CPFL_BIT(b)			(1 << (b))
+
+#define MAKE_MASK(type, mask, shift)	((u##type) (mask) << (shift))
+#define SHIFT_VAL_LT(type, val, field)		\
+		(((u##type)(val) << field##_S) & field##_M)
+#define SHIFT_VAL_RT(type, val, field)		\
+		(((u##type)(val) & field##_M) >> field##_S)
+
+#define MAKE_MASK_VAL(type, bit_len)	(((u##type)0x01 << (bit_len)) - 1)
+#define MAKE_MASK_VAL16(bit_len)	MAKE_MASK_VAL(16, bit_len)
+#define MAKE_MASK_VAL64(bit_len)	MAKE_MASK_VAL(64, bit_len)
+
+#define MAKE_MASK64(mask, shift)	MAKE_MASK(64, mask, shift)
+#define MAKE_MASK16(mask, shift)	MAKE_MASK(16, mask, shift)
+#define MAKE_MASK32(mask, shift)	MAKE_MASK(32, mask, shift)
+
+/* Make masks with bit length and left-shifting count */
+#define MAKE_SMASK(type, bits, shift)	\
+	((((u##type)1 << (bits)) - 1) << (shift))
+#define MAKE_SMASK64(bits, shift)	MAKE_SMASK(64, bits, shift)
+#define MAKE_SMASK32(bits, shift)	MAKE_SMASK(32, bits, shift)
+#define MAKE_SMASK16(bits, shift)	MAKE_SMASK(16, bits, shift)
+
+#define SHIFT_VAL64(val, field)		SHIFT_VAL_LT(64, val, field)
+#define SHIFT_VAL32(val, field)		SHIFT_VAL_LT(32, val, field)
+#define SHIFT_VAL16(val, field)		SHIFT_VAL_LT(16, val, field)
+
+/* Rule Config queue opcodes */
+enum cpfl_ctlq_rule_cfg_opc {
+	cpfl_ctlq_sem_add_rule				= 0x1303,
+	cpfl_ctlq_sem_update_rule			= 0x1304,
+	cpfl_ctlq_sem_del_rule				= 0x1305,
+	cpfl_ctlq_sem_query_rule			= 0x1306,
+	cpfl_ctlq_sem_query_rule_hash_addr		= 0x1307,
+	cpfl_ctlq_sem_query_del_rule_hash_addr		= 0x1308,
+
+	cpfl_ctlq_mod_add_update_rule			= 0x1360,
+	cpfl_ctlq_mod_query_rule			= 0x1361,
+};
+
+enum cpfl_cfg_pkt_error_code {
+	CPFL_CFG_PKT_ERR_OK = 0,
+	CPFL_CFG_PKT_ERR_ESRCH = 1,     /* Bad opcode */
+	CPFL_CFG_PKT_ERR_EEXIST = 2,    /* Entry Already exists */
+	CPFL_CFG_PKT_ERR_ENOSPC = 4,    /* No space left in the table*/
+	CPFL_CFG_PKT_ERR_ERANGE = 5,    /* Parameter out of range */
+	CPFL_CFG_PKT_ERR_ESBCOMP = 6,   /* Completion Error */
+	CPFL_CFG_PKT_ERR_ENOPIN = 7,    /* Entry cannot be pinned in cache */
+	CPFL_CFG_PKT_ERR_ENOTFND = 8,   /* Entry Not exists */
+	CPFL_CFG_PKT_ERR_EMAXCOL = 9    /* Max Hash Collision */
+};
+
+/* macros for creating context for rule descriptor */
+#define MEV_RULE_VSI_ID_S		0
+#define MEV_RULE_VSI_ID_M		\
+		MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S)
+
+#define MEV_RULE_TIME_SEL_S		13
+#define MEV_RULE_TIME_SEL_M		\
+		MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S)
+
+#define MEV_RULE_TIME_SEL_VAL_S		15
+#define MEV_RULE_TIME_SEL_VAL_M		\
+		MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S)
+
+#define MEV_RULE_PORT_NUM_S		16
+#define MEV_RULE_HOST_ID_S		18
+#define MEV_RULE_PORT_NUM_M		\
+		MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S)
+#define MEV_RULE_HOST_ID_M		\
+		MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S)
+
+#define MEV_RULE_CACHE_WR_THRU_S	21
+#define MEV_RULE_CACHE_WR_THRU_M	\
+		MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S)
+
+#define MEV_RULE_RESP_REQ_S		22
+#define MEV_RULE_RESP_REQ_M		\
+		MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S)
+#define MEV_RULE_OBJ_ADDR_S		24
+#define MEV_RULE_OBJ_ADDR_M		\
+		MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S)
+#define MEV_RULE_OBJ_ID_S		59
+#define MEV_RULE_OBJ_ID_M		\
+		MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S)
+
+/* macros for creating CFG_CTRL for sem/lem rule blob */
+#define MEV_RULE_CFG_CTRL_PROF_ID_S			0
+#define MEV_RULE_CFG_CTRL_PROF_ID_M			\
+		MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S)
+
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S		11
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M		\
+		MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S)
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_S		13
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S)
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S	14
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M	\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S)
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S		15
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S)
+
+/**
+ * macro to build the CFG_CTRL for rule packet data, which is one of
+ * cpfl_prep_sem_rule_blob()'s input parameter.
+ */
+ /* build SEM CFG_CTRL*/
+#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id,		       \
+				       pin_to_cache, fixed_fetch)	       \
+		(SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID)	     | \
+		 SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID)   | \
+		 SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE)    | \
+		 SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH))
+
+/* build LEM CFG_CTRL*/
+#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror)    \
+		(SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID)             | \
+		 SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE)      | \
+		 SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR))
+
+/* macros for creating mod content config packets */
+#define MEV_RULE_MOD_INDEX_S		24
+#define MEV_RULE_MOD_INDEX_M		\
+		MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S)
+
+#define MEV_RULE_PIN_MOD_CONTENT_S	62
+#define MEV_RULE_PIN_MOD_CONTENT_M	\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S)
+#define MEV_RULE_MOD_OBJ_SIZE_S		63
+#define MEV_RULE_MOD_OBJ_SIZE_M		\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S)
+
+/**
+ * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM
+ * note: The key may be in mixed big/little endian format, the rest of members
+ * are in little endian
+ */
+struct cpfl_sem_rule_cfg_pkt {
+#define MEV_SEM_RULE_KEY_SIZE 128
+	uint8_t key[MEV_SEM_RULE_KEY_SIZE];
+
+#define MEV_SEM_RULE_ACT_SIZE 72
+	uint8_t actions[MEV_SEM_RULE_ACT_SIZE];
+
+	/* Bit(s):
+	 * 10:0 : PROFILE_ID
+	 * 12:11: SUB_PROF_ID (used for SEM only)
+	 * 13   : pin the SEM key content into the cache
+	 * 14   : Reserved
+	 * 15   : Fixed_fetch
+	 */
+	uint8_t cfg_ctrl[2];
+
+	/* Bit(s):
+	 * 0:     valid
+	 * 15:1:  Hints
+	 * 26:16: PROFILE_ID, the profile associated with the entry
+	 * 31:27: PF
+	 * 55:32: FLOW ID (assigned by HW)
+	 * 63:56: EPOCH
+	 */
+	uint8_t ctrl_word[8];
+	uint8_t padding[46];
+};
+
+/**
+ * union cpfl_rule_cfg_pkt_record - Describes rule data blob
+ */
+union cpfl_rule_cfg_pkt_record {
+	struct cpfl_sem_rule_cfg_pkt sem_rule;
+	uint8_t pkt_data[256];
+	uint8_t mod_blob[256];
+};
+
+/**
+ * cpfl_rule_query_addr - LEM/SEM Rule Query Address structure
+ */
+struct cpfl_rule_query_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_query_del_addr - Rule Query and Delete Address
+ */
+struct cpfl_rule_query_del_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_mod_content - MOD Rule Content
+ */
+struct cpfl_rule_mod_content {
+	uint8_t	obj_size;
+	uint8_t	pin_content;
+	uint32_t	index;
+};
+
+/**
+ * cpfl_rule_cfg_data_common - data struct for all rule opcodes
+ *note: some rules may only require part of structure
+ */
+struct cpfl_rule_cfg_data_common {
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	uint64_t	cookie;
+	uint16_t	vsi_id;
+	uint8_t	port_num;
+	uint8_t	host_id;
+	uint8_t	time_sel;
+	uint8_t	time_sel_val;
+	uint8_t	cache_wr_thru;
+	uint8_t	resp_req;
+	uint32_t	ret_val;
+	uint16_t	buf_len;
+	struct idpf_dma_mem *payload;
+};
+
+/**
+ * cpfl_rule_cfg_data - rule config data
+ * note: Before sending rule to HW, caller needs to fill
+ *       in this struct then call cpfl_prep_rule_desc().
+ */
+struct cpfl_rule_cfg_data {
+	struct cpfl_rule_cfg_data_common common;
+	union {
+		struct cpfl_rule_query_addr query_addr;
+		struct cpfl_rule_query_del_addr query_del_addr;
+		struct cpfl_rule_mod_content mod_content;
+	} ext;
+};
+
+/**
+ * cpfl_fill_rule_mod_content - fill info for mod content
+ */
+static inline void
+cpfl_fill_rule_mod_content(uint8_t mod_obj_size,
+			   uint8_t pin_mod_content,
+			   uint32_t mod_index,
+			   struct cpfl_rule_mod_content *mod_content)
+{
+	mod_content->obj_size = mod_obj_size;
+	mod_content->pin_content = pin_mod_content;
+	mod_content->index = mod_index;
+}
+
+/**
+ * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes
+ * note: call this function before calls cpfl_prep_rule_desc()
+ */
+static inline void
+cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc,
+			       uint64_t cookie,
+			       uint16_t vsi_id,
+			       uint8_t port_num,
+			       uint8_t host_id,
+			       uint8_t time_sel,
+			       uint8_t time_sel_val,
+			       uint8_t cache_wr_thru,
+			       uint8_t resp_req,
+			       uint16_t payload_len,
+			       struct idpf_dma_mem *payload,
+			       struct cpfl_rule_cfg_data_common *cfg_cmn)
+{
+	cfg_cmn->opc = opc;
+	cfg_cmn->cookie = cookie;
+	cfg_cmn->vsi_id = vsi_id;
+	cfg_cmn->port_num = port_num;
+	cfg_cmn->resp_req = resp_req;
+	cfg_cmn->ret_val = 0;
+	cfg_cmn->host_id = host_id;
+	cfg_cmn->time_sel = time_sel;
+	cfg_cmn->time_sel_val = time_sel_val;
+	cfg_cmn->cache_wr_thru = cache_wr_thru;
+
+	cfg_cmn->buf_len = payload_len;
+	cfg_cmn->payload = payload;
+}
+
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg);
+
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob);
+
+#endif /* _CPFL_RULES_API_H_ */
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
index a21a4a451f..932840a972 100644
--- a/drivers/net/cpfl/cpfl_vchnl.c
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 
 	return 0;
 }
+
+int
+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct virtchnl2_create_vport vport_msg;
+	struct idpf_cmd_info args;
+	int err = -1;
+
+	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
+	vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+	vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
+	vport_msg.num_tx_complq = 0;
+	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
+	vport_msg.num_rx_bufq = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+	args.in_args = (uint8_t *)&vport_msg;
+	args.in_args_size = sizeof(vport_msg);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+		return err;
+	}
+
+	rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
+		   IDPF_DFLT_MBX_BUF_SIZE);
+	return err;
+}
+
+int
+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_RX_CFGQ_NUM;
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (!vc_rxqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->base.vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		rxq_info = &vc_rxqs->qinfo[i];
+		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
+		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;
+		rxq_info->max_pkt_size = vport->base.max_pkt_len;
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_rxqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_TX_CFGQ_NUM;
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (!vc_txqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->base.vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		txq_info = &vc_txqs->qinfo[i];
+		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
+		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_txqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 9f1818f8dc..53eb5aecad 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -18,6 +18,8 @@ sources = files(
         'cpfl_rxtx.c',
         'cpfl_vchnl.c',
         'cpfl_representor.c',
+        'cpfl_controlq.c',
+        'cpfl_rules.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v4 5/9] net/cpfl: add fxp rule module
  2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
                         ` (3 preceding siblings ...)
  2023-08-15 16:50       ` [PATCH v4 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
@ 2023-08-15 16:50       ` Zhang, Yuying
  2023-08-15 16:50       ` [PATCH v4 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
                         ` (3 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-15 16:50 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Added low level fxp module for rule packing / creation / destroying.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_controlq.c | 424 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  24 ++
 drivers/net/cpfl/cpfl_ethdev.c   |  31 +++
 drivers/net/cpfl/cpfl_ethdev.h   |   6 +
 drivers/net/cpfl/cpfl_fxp_rule.c | 296 +++++++++++++++++++++
 drivers/net/cpfl/cpfl_fxp_rule.h |  68 +++++
 drivers/net/cpfl/meson.build     |   1 +
 7 files changed, 850 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h

diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
index 476c78f235..ed76282b0c 100644
--- a/drivers/net/cpfl/cpfl_controlq.c
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -331,6 +331,402 @@ cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
 	return status;
 }
 
+/**
+ * cpfl_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+	       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_desc *desc;
+	int num_desc_avail = 0;
+	int status = 0;
+	int i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	/* Ensure there are enough descriptors to send all messages */
+	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+		status = -ENOSPC;
+		goto sq_send_command_out;
+	}
+
+	for (i = 0; i < num_q_msg; i++) {
+		struct idpf_ctlq_msg *msg = &q_msg[i];
+		uint64_t msg_cookie;
+
+		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+		desc->opcode = CPU_TO_LE16(msg->opcode);
+		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+		msg_cookie = *(uint64_t *)&msg->cookie;
+		desc->cookie_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));
+		desc->cookie_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));
+		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+				IDPF_CTLQ_FLAG_HOST_ID_S);
+		if (msg->data_len) {
+			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+			desc->datalen |= CPU_TO_LE16(msg->data_len);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+			/* Update the address values in the desc with the pa
+			 * value for respective buffer
+			 */
+			desc->params.indirect.addr_high =
+				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+			desc->params.indirect.addr_low =
+				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		} else {
+			idpf_memcpy(&desc->params, msg->ctx.direct,
+				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		}
+
+		/* Store buffer info */
+		cq->bi.tx_msg[cq->next_to_use] = msg;
+		(cq->next_to_use)++;
+		if (cq->next_to_use == cq->ring_size)
+			cq->next_to_use = 0;
+	}
+
+	/* Force memory write to complete before letting hardware
+	 * know that there are new descriptors to fetch.
+	 */
+	idpf_wmb();
+	wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+	idpf_release_lock(&cq->cq_lock);
+
+	return status;
+}
+
+/**
+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
+ * back for the requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ * @force: (input) clean descriptors which were not done yet. Use with caution
+ * in kernel mode only
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+static int
+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		     struct idpf_ctlq_msg *msg_status[], bool force)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t i = 0, num_to_clean;
+	uint16_t ntc, desc_err;
+	int ret = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*clean_count == 0)
+		return 0;
+	if (*clean_count > cq->ring_size)
+		return -EINVAL;
+
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *clean_count;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		desc_err = LE16_TO_CPU(desc->ret_val);
+		if (desc_err) {
+			/* strip off FW internal code */
+			desc_err &= 0xff;
+		}
+
+		msg_status[i] = cq->bi.tx_msg[ntc];
+		if (!msg_status[i])
+			break;
+		msg_status[i]->status = desc_err;
+		cq->bi.tx_msg[ntc] = NULL;
+		/* Zero out any stale data */
+		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	}
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+
+	/* Return number of descriptors actually cleaned */
+	*clean_count = i;
+
+	return ret;
+}
+
+/**
+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		   struct idpf_ctlq_msg *msg_status[])
+{
+	return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
+}
+
+/**
+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t ntp = cq->next_to_post;
+	bool buffs_avail = false;
+	uint16_t tbp = ntp + 1;
+	int status = 0;
+	int i = 0;
+
+	if (*buff_count > cq->ring_size)
+		return -EINVAL;
+
+	if (*buff_count > 0)
+		buffs_avail = true;
+	idpf_acquire_lock(&cq->cq_lock);
+	if (tbp >= cq->ring_size)
+		tbp = 0;
+
+	if (tbp == cq->next_to_clean)
+		/* Nothing to do */
+		goto post_buffs_out;
+
+	/* Post buffers for as many as provided or up until the last one used */
+	while (ntp != cq->next_to_clean) {
+		desc = IDPF_CTLQ_DESC(cq, ntp);
+		if (cq->bi.rx_buff[ntp])
+			goto fill_desc;
+		if (!buffs_avail) {
+			/* If the caller hasn't given us any buffers or
+			 * there are none left, search the ring itself
+			 * for an available buffer to move to this
+			 * entry starting at the next entry in the ring
+			 */
+			tbp = ntp + 1;
+			/* Wrap ring if necessary */
+			if (tbp >= cq->ring_size)
+				tbp = 0;
+
+			while (tbp != cq->next_to_clean) {
+				if (cq->bi.rx_buff[tbp]) {
+					cq->bi.rx_buff[ntp] =
+						cq->bi.rx_buff[tbp];
+					cq->bi.rx_buff[tbp] = NULL;
+
+					/* Found a buffer, no need to
+					 * search anymore
+					 */
+					break;
+				}
+
+				/* Wrap ring if necessary */
+				tbp++;
+				if (tbp >= cq->ring_size)
+					tbp = 0;
+			}
+
+			if (tbp == cq->next_to_clean)
+				goto post_buffs_out;
+		} else {
+			/* Give back pointer to DMA buffer */
+			cq->bi.rx_buff[ntp] = buffs[i];
+			i++;
+
+			if (i >= *buff_count)
+				buffs_avail = false;
+		}
+
+fill_desc:
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+		/* Post buffers to descriptor */
+		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+		ntp++;
+		if (ntp == cq->ring_size)
+			ntp = 0;
+	}
+
+post_buffs_out:
+	/* Only update tail if buffers were actually posted */
+	if (cq->next_to_post != ntp) {
+		if (ntp)
+			/* Update next_to_post to ntp - 1 since current ntp
+			 * will not have a buffer
+			 */
+			cq->next_to_post = ntp - 1;
+		else
+			/* Wrap to end of end ring since current ntp is 0 */
+			cq->next_to_post = cq->ring_size - 1;
+
+		wr32(hw, cq->reg.tail, cq->next_to_post);
+	}
+
+	idpf_release_lock(&cq->cq_lock);
+	/* return the number of buffers that were not posted */
+	*buff_count = *buff_count - i;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+	       struct idpf_ctlq_msg *q_msg)
+{
+	uint16_t num_to_clean, ntc, ret_val, flags;
+	struct idpf_ctlq_desc *desc;
+	int ret_code = 0;
+	uint16_t i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*num_q_msg == 0)
+		return 0;
+	else if (*num_q_msg > cq->ring_size)
+		return -EINVAL;
+
+	/* take the lock before we start messing with the ring */
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *num_q_msg;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		flags = LE16_TO_CPU(desc->flags);
+		if (!(flags & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		ret_val = LE16_TO_CPU(desc->ret_val);
+		q_msg[i].vmvf_type = (flags &
+				     (IDPF_CTLQ_FLAG_FTYPE_VM |
+				      IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+				      IDPF_CTLQ_FLAG_FTYPE_S;
+
+		if (flags & IDPF_CTLQ_FLAG_ERR)
+			ret_code = -EBADMSG;
+
+		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
+		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
+		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+		q_msg[i].status = ret_val;
+
+		if (desc->datalen) {
+			idpf_memcpy(q_msg[i].ctx.indirect.context,
+				    &desc->params.indirect,
+				    IDPF_INDIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+
+			/* Assign pointer to dma buffer to ctlq_msg array
+			 * to be given to upper layer
+			 */
+			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+			/* Zero out pointer to DMA buffer info;
+			 * will be repopulated by post buffers API
+			 */
+			cq->bi.rx_buff[ntc] = NULL;
+		} else {
+			idpf_memcpy(q_msg[i].ctx.direct,
+				    desc->params.raw,
+				    IDPF_DIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+		}
+
+		/* Zero out stale data in descriptor */
+		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+			    IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	};
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+	*num_q_msg = i;
+	if (*num_q_msg == 0)
+		ret_code = -ENOMSG;
+
+	return ret_code;
+}
+
 int
 cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
 		    struct idpf_ctlq_info **cq)
@@ -377,3 +773,31 @@ cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
 {
 	cpfl_ctlq_remove(hw, cq);
 }
+
+int
+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		     struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			      uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
+
+int
+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			 struct idpf_ctlq_msg *msg_status[])
+{
+	return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
index 930d717f63..740ae6522c 100644
--- a/drivers/net/cpfl/cpfl_controlq.h
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -14,6 +14,13 @@
 #define CPFL_DFLT_MBX_RING_LEN		512
 #define CPFL_CFGQ_RING_LEN		512
 
+/* CRQ/CSQ specific error codes */
+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */
+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */
+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */
+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */
+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */
+
 /* Generic queue info structures */
 /* MB, CONFIG and EVENT q do not have extended info */
 struct cpfl_ctlq_create_info {
@@ -44,8 +51,25 @@ int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
 int cpfl_ctlq_add(struct idpf_hw *hw,
 		  struct cpfl_ctlq_create_info *qinfo,
 		  struct idpf_ctlq_info **cq);
+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+		       struct idpf_ctlq_msg *msg_status[]);
+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			    u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg);
 int cpfl_vport_ctlq_add(struct idpf_hw *hw,
 			struct cpfl_ctlq_create_info *qinfo,
 			struct idpf_ctlq_info **cq);
 void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			 u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+			 struct idpf_ctlq_msg q_msg[]);
+
+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+				  u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+			     struct idpf_ctlq_msg *msg_status[]);
 #endif
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 88e2ecf754..cb407e66af 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 #include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
 	adapter->vports[vport->sw_idx] = NULL;
+	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
 	rte_free(cpfl_vport);
 
 	return 0;
@@ -2466,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
 	return 0;
 }
 
+int
+cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size,
+			 int batch_size)
+{
+	int i;
+
+	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
+		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < batch_size; i++) {
+		dma[i].va = (void *)((uint64_t)orig_dma->va + size * (i + 1));
+		dma[i].pa = orig_dma->pa + size * (i + 1);
+		dma[i].size = size;
+		dma[i].zone = NULL;
+	}
+	return 0;
+}
+
 static int
 cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
@@ -2515,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
 			    &dev->data->mac_addrs[0]);
 
+	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
+	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
+				       cpfl_vport->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		goto err_mac_addrs;
+
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
 		memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
 		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7f83d170d7..8eeeac9910 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -147,10 +147,14 @@ enum cpfl_itf_type {
 
 TAILQ_HEAD(cpfl_flow_list, rte_flow);
 
+#define CPFL_FLOW_BATCH_SIZE  490
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
 	struct cpfl_flow_list flow_list;
+	struct idpf_dma_mem flow_dma;
+	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
 	void *data;
 };
 
@@ -240,6 +244,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
+int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma,
+			     uint32_t size, int batch_size);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
new file mode 100644
index 0000000000..50fac55432
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.c
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		   struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_msg **msg_ptr_list;
+	u16 clean_count = 0;
+	int num_cleaned = 0;
+	int retries = 0;
+	int ret = 0;
+
+	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+	if (!msg_ptr_list) {
+		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+		goto send_err;
+	}
+
+	while (retries <= CTLQ_SEND_RETRIES) {
+		clean_count = num_q_msg - num_cleaned;
+		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+					       &msg_ptr_list[num_cleaned]);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+			goto send_err;
+		}
+
+		num_cleaned += clean_count;
+		retries++;
+		if (num_cleaned >= num_q_msg)
+			break;
+		rte_delay_us_sleep(10);
+	}
+
+	if (retries > CTLQ_SEND_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for completions");
+		ret = -1;
+		goto send_err;
+	}
+
+send_err:
+	if (msg_ptr_list)
+		free(msg_ptr_list);
+err:
+	return ret;
+}
+
+static int
+cpfl_process_rx_ctlq_msg(u16 num_q_msg, struct idpf_ctlq_msg *q_msg)
+{
+	u16 i;
+
+	if (!num_q_msg || !q_msg)
+		return -EINVAL;
+
+	for (i = 0; i < num_q_msg; i++) {
+		if (q_msg[i].status == CPFL_CFG_PKT_ERR_OK) {
+			continue;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_EEXIST &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_add_rule) {
+			PMD_INIT_LOG(ERR, "The rule has confliction with already existed one");
+			return -EINVAL;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_ENOTFND &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_del_rule) {
+			PMD_INIT_LOG(ERR, "The rule has already deleted");
+			return -EINVAL;
+		} else {
+			PMD_INIT_LOG(ERR, "Invalid rule");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		      struct idpf_ctlq_msg q_msg[])
+{
+	int retries = 0;
+	struct idpf_dma_mem *dma;
+	u16 i;
+	uint16_t buff_cnt;
+	int ret = 0, handle_rule = 0;
+
+	retries = 0;
+	while (retries <= CTLQ_RECEIVE_RETRIES) {
+		rte_delay_us_sleep(10);
+		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+		    ret != CPFL_ERR_CTLQ_ERROR) {
+			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_EMPTY)
+			break;
+
+		ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to process rx_ctrlq msg");
+			handle_rule = ret;
+		}
+
+		for (i = 0; i < num_q_msg; i++) {
+			if (q_msg[i].data_len > 0)
+				dma = q_msg[i].ctx.indirect.payload;
+			else
+				dma = NULL;
+
+			buff_cnt = dma ? 1 : 0;
+			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+			if (ret)
+				PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+		}
+		break;
+	}
+
+	if (retries > CTLQ_RECEIVE_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+		ret = -1;
+	}
+
+	return ret + handle_rule;
+}
+
+static int
+cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		   struct idpf_ctlq_msg *msg)
+{
+	struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	struct cpfl_rule_cfg_data cfg = {0};
+
+	/* prepare rule blob */
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(&cfg, 0, sizeof(cfg));
+
+	/* fill info for both query and add/update */
+	cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+				   minfo->pin_mod_content,
+				   minfo->mod_index,
+				   &cfg.ext.mod_content);
+
+	/* only fill content for add/update */
+	memcpy(blob->mod_blob, minfo->mod_content,
+	       minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+	/* pack message */
+	cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+				       rinfo->cookie,
+				       0, /* vsi_id not used for mod */
+				       rinfo->port_num,
+				       NO_HOST_NEEDED,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       (u16)sizeof(*blob),
+				       (void *)dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		       struct idpf_ctlq_msg *msg, bool add)
+{
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	struct cpfl_rule_cfg_data cfg;
+	uint16_t cfg_ctrl;
+
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(msg, 0, sizeof(*msg));
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+							  rinfo->sem.sub_prof_id,
+							  rinfo->sem.pin_to_cache,
+							  rinfo->sem.fixed_fetch);
+		cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+					rinfo->act_bytes, rinfo->act_byte_len,
+					cfg_ctrl, blob);
+		opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+	} else {
+		PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
+		return -1;
+	}
+
+	cpfl_fill_rule_cfg_data_common(opc,
+				       rinfo->cookie,
+				       rinfo->vsi,
+				       rinfo->port_num,
+				       rinfo->host_id,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+	       struct idpf_ctlq_msg *msg, bool add)
+{
+	int ret = 0;
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
+			ret = -1;
+	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+		if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)
+			ret = -1;
+	} else {
+		PMD_INIT_LOG(ERR, "Invalid type of rule");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+int
+cpfl_rule_process(struct cpfl_itf *itf,
+		  struct idpf_ctlq_info *tx_cq,
+		  struct idpf_ctlq_info *rx_cq,
+		  struct cpfl_rule_info *rinfo,
+		  int rule_num,
+		  bool add)
+{
+	struct idpf_hw *hw = &itf->adapter->base.hw;
+	int i;
+	int ret = 0;
+
+	if (rule_num == 0)
+		return 0;
+
+	for (i = 0; i < rule_num; i++) {
+		ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Could not pack rule");
+			return ret;
+		}
+	}
+	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to send control message");
+		return ret;
+	}
+	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to update rule");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
new file mode 100644
index 0000000000..ed757b80b1
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+	uint16_t prof_id;
+	uint8_t sub_prof_id;
+	uint8_t key[CPFL_MAX_KEY_LEN];
+	uint8_t key_byte_len;
+	uint8_t pin_to_cache;
+	uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+	uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+	uint8_t mod_content_byte_len;
+	uint32_t mod_index;
+	uint8_t pin_mod_content;
+	uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+	CPFL_RULE_TYPE_NONE,
+	CPFL_RULE_TYPE_SEM,
+	CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+	enum cpfl_rule_type type;
+	uint64_t cookie;
+	uint8_t host_id;
+	uint8_t port_num;
+	uint8_t resp_req;
+	/* TODO: change this to be dynamically allocated/reallocated */
+	uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+	uint8_t act_byte_len;
+	/* vsi is used for lem and lpm rules */
+	uint16_t vsi;
+	uint8_t clear_mirror_1st_state;
+	/* mod related fields */
+	union {
+		struct cpfl_mod_rule_info mod;
+		struct cpfl_sem_rule_info sem;
+	};
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_process(struct cpfl_itf *itf,
+		      struct idpf_ctlq_info *tx_cq,
+		      struct idpf_ctlq_info *rx_cq,
+		      struct cpfl_rule_info *rinfo,
+		      int rule_num,
+		      bool add);
+int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		       struct idpf_ctlq_msg q_msg[]);
+int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+			  struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 53eb5aecad..a06265e6d5 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -49,6 +49,7 @@ if js_dep.found()
         sources += files(
 		'cpfl_flow.c',
                 'cpfl_flow_parser.c',
+		'cpfl_fxp_rule.c',
         )
         dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
         ext_deps += js_dep
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v4 6/9] net/cpfl: add fxp flow engine
  2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
                         ` (4 preceding siblings ...)
  2023-08-15 16:50       ` [PATCH v4 5/9] net/cpfl: add fxp rule module Zhang, Yuying
@ 2023-08-15 16:50       ` Zhang, Yuying
  2023-08-15 16:50       ` [PATCH v4 7/9] net/cpfl: add flow support for representor Zhang, Yuying
                         ` (2 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-15 16:50 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Adapt fxp low level as a flow engine.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h          |  27 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 583 ++++++++++++++++++++++++
 drivers/net/cpfl/meson.build            |   1 +
 3 files changed, 611 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 8eeeac9910..efb0eb5251 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -85,6 +85,8 @@
 
 #define CPFL_RX_CFGQ_NUM	4
 #define CPFL_TX_CFGQ_NUM	4
+#define CPFL_FPCP_CFGQ_TX	0
+#define CPFL_FPCP_CFGQ_RX	1
 #define CPFL_CFGQ_NUM		8
 
 /* bit[15:14] type
@@ -219,6 +221,8 @@ struct cpfl_adapter_ext {
 	struct rte_hash *repr_allowlist_hash;
 
 	struct cpfl_flow_js_parser *flow_parser;
+	struct rte_bitmap *mod_bm;
+	void *mod_bm_mem;
 
 	struct cpfl_metadata meta;
 
@@ -312,4 +316,27 @@ cpfl_get_vsi_id(struct cpfl_itf *itf)
 	return CPFL_INVALID_HW_ID;
 }
 
+static inline struct cpfl_itf *
+cpfl_get_itf_by_port_id(uint16_t port_id)
+{
+	struct rte_eth_dev *dev;
+
+	if (port_id >= RTE_MAX_ETHPORTS) {
+		PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS);
+		return NULL;
+	}
+
+	dev = &rte_eth_devices[port_id];
+	if (dev->state == RTE_ETH_DEV_UNUSED) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id);
+		return NULL;
+	}
+
+	if (!dev->data) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id);
+		return NULL;
+	}
+
+	return CPFL_DEV_TO_ITF(dev);
+}
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
new file mode 100644
index 0000000000..e0c08a77c3
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <math.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_memcpy.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include <ethdev_driver.h>
+#include "cpfl_rules.h"
+#include "cpfl_logs.h"
+#include "cpfl_ethdev.h"
+#include "cpfl_flow.h"
+#include "cpfl_fxp_rule.h"
+#include "cpfl_flow_parser.h"
+
+#define CPFL_COOKIE_DEF		0x1000
+#define CPFL_MOD_COOKIE_DEF	0x1237561
+#define CPFL_PREC_DEF		1
+#define CPFL_PREC_SET		5
+#define CPFL_TYPE_ID		3
+#define CPFL_OFFSET		0x0a
+#define CPFL_HOST_ID_DEF	0
+#define CPFL_PF_NUM_DEF		0
+#define CPFL_PORT_NUM_DEF	0
+#define CPFL_RESP_REQ_DEF	2
+#define CPFL_PIN_TO_CACHE_DEF	0
+#define CPFL_CLEAR_MIRROR_1ST_STATE_DEF	0
+#define CPFL_FIXED_FETCH_DEF	0
+#define CPFL_PTI_DEF		0
+#define CPFL_MOD_OBJ_SIZE_DEF	0
+#define CPFL_PIN_MOD_CONTENT_DEF	0
+
+#define CPFL_MAX_MOD_CONTENT_INDEX	256
+#define CPFL_MAX_MR_ACTION_NUM	8
+
+/* Struct used when parse detailed rule information with json file */
+struct cpfl_rule_info_meta {
+	struct cpfl_flow_pr_action pr_action;	/* json action field of pattern rule */
+	uint32_t pr_num;			/* number of pattern rules */
+	uint32_t mr_num;			/* number of modification rules */
+	uint32_t rule_num;			/* number of all rules */
+	struct cpfl_rule_info rules[0];
+};
+
+static uint32_t cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad);
+static void cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
+uint64_t cpfl_rule_cookie = CPFL_COOKIE_DEF;
+
+static int
+cpfl_fxp_create(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim = meta;
+	struct cpfl_vport *vport;
+
+	if (!rim)
+		return ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		/* Every vport has one pair control queues configured to handle message.
+		 * Even index is tx queue and odd index is rx queue.
+		 */
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
+				rim->rules, rim->rule_num, true);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "cpfl filter create flow fail");
+		rte_free(rim);
+		return ret;
+	}
+
+	flow->rule = rim;
+
+	return ret;
+}
+
+static inline void
+cpfl_fxp_rule_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+	flow->rule = NULL;
+}
+
+static int
+cpfl_fxp_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim;
+	uint32_t i;
+	struct cpfl_vport *vport;
+
+	rim = flow->rule;
+	if (!rim) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "no such flow create by cpfl filter");
+
+		return -rte_errno;
+	}
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		ret = -rte_errno;
+		goto err;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules,
+				rim->rule_num, false);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to destroy cpfl filter rule");
+		goto err;
+	}
+
+	/* free mod index */
+	for (i = rim->pr_num; i < rim->rule_num; i++)
+		cpfl_fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
+err:
+	cpfl_fxp_rule_free(flow);
+	return ret;
+}
+
+static bool
+cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action,
+		       struct cpfl_rule_info_meta *rim,
+		       int i)
+{
+	if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		struct cpfl_rule_info *rinfo = &rim->rules[i];
+
+		rinfo->type = CPFL_RULE_TYPE_SEM;
+		rinfo->sem.prof_id = pr_action->sem.prof;
+		rinfo->sem.sub_prof_id = pr_action->sem.subprof;
+		rinfo->sem.key_byte_len = pr_action->sem.keysize;
+		rte_memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len);
+		rinfo->sem.pin_to_cache = CPFL_PIN_TO_CACHE_DEF;
+		rinfo->sem.fixed_fetch = CPFL_FIXED_FETCH_DEF;
+	} else {
+		PMD_DRV_LOG(ERR, "Invalid pattern item.");
+		return false;
+	}
+
+	return true;
+}
+
+static int
+cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_rule_info *match_rinfo,
+		       struct cpfl_rule_info *mod_rinfo,
+		       const struct cpfl_flow_mr_action *mr_action)
+{
+	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
+	uint32_t mod_idx;
+	int i;
+	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
+	union cpfl_action_set *act_set =
+		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
+
+	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
+		return -EINVAL;
+
+	*act_set = cpfl_act_mod_profile(CPFL_PREC_DEF,
+					mr_action->mod.prof,
+					CPFL_PTI_DEF,
+					0, /* append */
+					0, /* prepend */
+					CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_idx = cpfl_fxp_mod_idx_alloc(adapter);
+	if (mod_idx == CPFL_MAX_MOD_CONTENT_INDEX) {
+		PMD_DRV_LOG(ERR, "Out of Mod Index.");
+		return -ENOMEM;
+	}
+
+	*act_set = cpfl_act_mod_addr(CPFL_PREC_DEF, mod_idx);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
+	minfo->mod_obj_size = CPFL_MOD_OBJ_SIZE_DEF;
+	minfo->pin_mod_content = CPFL_PIN_MOD_CONTENT_DEF;
+	minfo->mod_index = mod_idx;
+	mod_rinfo->cookie = CPFL_MOD_COOKIE_DEF;
+	mod_rinfo->port_num = CPFL_PORT_NUM_DEF;
+	mod_rinfo->resp_req = CPFL_RESP_REQ_DEF;
+
+	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
+	for (i = 0; i < minfo->mod_content_byte_len; i++)
+		minfo->mod_content[i] = mr_action->mod.data[i];
+
+	return 0;
+}
+
+#define CPFL_FXP_MAX_QREGION_SIZE 128
+#define CPFL_INVALID_QUEUE_ID -2
+static int
+cpfl_fxp_parse_action(struct cpfl_itf *itf,
+		      const struct rte_flow_action *actions,
+		      const struct cpfl_flow_mr_action *mr_action,
+		      struct cpfl_rule_info_meta *rim,
+		      int priority,
+		      int index)
+{
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *rss;
+	struct rte_eth_dev_data *data;
+	enum rte_flow_action_type action_type;
+	struct cpfl_vport *vport;
+	/* used when action is PORT_REPRESENTOR type */
+	struct cpfl_itf *dst_itf;
+	uint16_t dev_id; /* vsi id */
+	int queue_id = -1;
+	bool fwd_vsi = false;
+	bool fwd_q = false;
+	uint32_t i;
+	struct cpfl_rule_info *rinfo = &rim->rules[index];
+	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
+
+	priority = CPFL_PREC_MAX - priority;
+	for (action = actions; action->type !=
+	     RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			if (!fwd_vsi)
+				fwd_vsi = true;
+			else
+				goto err;
+
+			act_ethdev = action->conf;
+			dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id);
+
+			if (!dst_itf)
+				goto err;
+
+			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
+				vport = (struct cpfl_vport *)dst_itf;
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			} else {
+				queue_id = CPFL_INVALID_QUEUE_ID;
+			}
+
+			dev_id = cpfl_get_vsi_id(dst_itf);
+
+			if (dev_id == CPFL_INVALID_HW_ID)
+				goto err;
+
+			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			act_q = action->conf;
+			data = itf->data;
+			if (act_q->index >= data->nb_rx_queues)
+				goto err;
+
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += act_q->index;
+			*act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			if (rss->queue_num <= 1)
+				goto err;
+			for (i = 0; i < rss->queue_num - 1; i++) {
+				if (rss->queue[i + 1] != rss->queue[i] + 1)
+					goto err;
+			}
+			data = itf->data;
+			if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues)
+				goto err;
+			if (!(rte_is_power_of_2(rss->queue_num) &&
+			      rss->queue_num <= CPFL_FXP_MAX_QREGION_SIZE))
+				goto err;
+
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += rss->queue[0];
+			*act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id,
+								  log(rss->queue_num) / log(2), 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			(*act_set).data = cpfl_act_drop(priority).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			(*act_set).data = cpfl_act_set_commit_mode(priority, 0).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			goto err;
+		}
+	}
+
+	if (mr_action) {
+		uint32_t i;
+
+		for (i = 0; i < rim->mr_num; i++)
+			if (cpfl_parse_mod_content(itf->adapter, rinfo,
+						   &rim->rules[rim->pr_num + i],
+						   &mr_action[i]))
+				goto err;
+	}
+
+	return 0;
+
+err:
+	PMD_DRV_LOG(ERR, "Invalid action type");
+	return -EINVAL;
+}
+
+static void
+cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo)
+{
+	if (cpfl_rule_cookie == ~0llu)
+		cpfl_rule_cookie = CPFL_COOKIE_DEF;
+	rinfo->cookie = cpfl_rule_cookie++;
+	rinfo->host_id = CPFL_HOST_ID_DEF;
+	rinfo->port_num = CPFL_PORT_NUM_DEF;
+	rinfo->resp_req = CPFL_RESP_REQ_DEF;
+	rinfo->clear_mirror_1st_state = CPFL_CLEAR_MIRROR_1ST_STATE_DEF;
+}
+
+static bool
+cpfl_is_mod_action(const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			return true;
+		default:
+			continue;
+		}
+	}
+	return false;
+}
+
+static int
+cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      void **meta)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_pr_action pr_action = { 0 };
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_flow_mr_action mr_action[CPFL_MAX_MR_ACTION_NUM] = { 0 };
+	uint32_t pr_num = 0;
+	uint32_t mr_num = 0;
+	struct cpfl_rule_info_meta *rim;
+	int ret;
+
+	ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr, &pr_action);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "No Match pattern support.");
+		return -EINVAL;
+	}
+
+	if (cpfl_is_mod_action(actions)) {
+		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "action parse fails.");
+			return -EINVAL;
+		}
+		mr_num++;
+	}
+
+	pr_num = 1;
+	rim = rte_zmalloc(NULL,
+			  sizeof(struct cpfl_rule_info_meta) +
+			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
+			  0);
+	if (!rim)
+		return -ENOMEM;
+
+	rim->pr_action = pr_action;
+	rim->pr_num = pr_num;
+	rim->mr_num = mr_num;
+	rim->rule_num = pr_num + mr_num;
+
+	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid pattern");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid action");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	cpfl_fill_rinfo_default_value(&rim->rules[0]);
+
+	if (!meta)
+		rte_free(rim);
+	else
+		*meta = rim;
+
+	return 0;
+}
+
+static int
+cpfl_fxp_mod_init(struct cpfl_adapter_ext *ad)
+{
+	uint32_t size = rte_bitmap_get_memory_footprint(CPFL_MAX_MOD_CONTENT_INDEX);
+	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+	if (!mem)
+		return -ENOMEM;
+
+	/* a set bit represent a free slot */
+	ad->mod_bm = rte_bitmap_init_with_all_set(CPFL_MAX_MOD_CONTENT_INDEX, mem, size);
+	if (!ad->mod_bm) {
+		rte_free(mem);
+		return -EINVAL;
+	}
+
+	ad->mod_bm_mem = mem;
+
+	return 0;
+}
+
+static void
+cpfl_fxp_mod_uninit(struct cpfl_adapter_ext *ad)
+{
+	rte_free(ad->mod_bm_mem);
+	ad->mod_bm_mem = NULL;
+	ad->mod_bm = NULL;
+}
+
+static uint32_t
+cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad)
+{
+	uint64_t slab = 0;
+	uint32_t pos = 0;
+
+	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
+		return CPFL_MAX_MOD_CONTENT_INDEX;
+
+	pos += __builtin_ffsll(slab) - 1;
+	rte_bitmap_clear(ad->mod_bm, pos);
+
+	return pos;
+}
+
+static void
+cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
+{
+	rte_bitmap_set(ad->mod_bm, idx);
+}
+
+static int
+cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
+	       struct rte_flow *flow __rte_unused,
+	       struct rte_flow_query_count *count __rte_unused,
+	       struct rte_flow_error *error)
+{
+	rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_HANDLE,
+			   NULL,
+			   "count action not supported by this module");
+
+	return -rte_errno;
+}
+
+static void
+cpfl_fxp_uninit(struct cpfl_adapter_ext *ad)
+{
+	cpfl_fxp_mod_uninit(ad);
+}
+
+static int
+cpfl_fxp_init(struct cpfl_adapter_ext *ad)
+{
+	int ret = 0;
+
+	ret = cpfl_fxp_mod_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct
+cpfl_flow_engine cpfl_fxp_engine = {
+	.type = CPFL_FLOW_ENGINE_FXP,
+	.init = cpfl_fxp_init,
+	.uninit = cpfl_fxp_uninit,
+	.create = cpfl_fxp_create,
+	.destroy = cpfl_fxp_destroy,
+	.query_count = cpfl_fxp_query,
+	.parse_pattern_action = cpfl_fxp_parse_pattern_action,
+};
+
+RTE_INIT(cpfl_sw_engine_init)
+{
+	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
+
+	cpfl_flow_engine_register(engine);
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index a06265e6d5..7c6a000933 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -50,6 +50,7 @@ if js_dep.found()
 		'cpfl_flow.c',
                 'cpfl_flow_parser.c',
 		'cpfl_fxp_rule.c',
+		'cpfl_flow_engine_fxp.c',
         )
         dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
         ext_deps += js_dep
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v4 7/9] net/cpfl: add flow support for representor
  2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
                         ` (5 preceding siblings ...)
  2023-08-15 16:50       ` [PATCH v4 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
@ 2023-08-15 16:50       ` Zhang, Yuying
  2023-08-15 16:50       ` [PATCH v4 8/9] app/test-pmd: refine encap content Zhang, Yuying
  2023-08-15 16:50       ` [PATCH v4 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-15 16:50 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Add flow support for representor, so representor can
create, destroy, validate and flush rules.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 doc/guides/nics/cpfl.rst                | 13 ++++
 doc/guides/rel_notes/release_23_11.rst  |  1 +
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 90 ++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_representor.c     | 29 ++++++++
 4 files changed, 130 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index aae157f0df..bcfa2a8a5b 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -194,3 +194,16 @@ low level hardware resources.
    .. code-block:: console
 
    dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
+
+#. Create one flow to forward ETH-IPV4-TCP from I/O port to a local(CPF's) vport::
+
+   .. code-block:: console
+
+   flow create 0 ingress group 1 pattern eth dst is 00:01:00:00:03:14 / ipv4 src is 192.168.0.1 \
+   dst is 192.168.0.2 / tcp / end actions port_representor port_id 0 / end
+
+#. Send the packet, and it should be displayed on PMD::
+
+   .. code-block:: console
+
+   sendp(Ether(dst='00:01:00:00:03:14')/IP(src='192.168.0.1',dst='192.168.0.2')/TCP(),iface="enp24s0f0")
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 3d9be208d0..bad71ad3fd 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -81,6 +81,7 @@ New Features
 * **Updated Intel cpfl driver.**
 
   * Added support for port representor.
+  * Added support for rte_flow.
 
 Removed Items
 -------------
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index e0c08a77c3..fed18d8349 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -73,6 +73,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 	struct cpfl_adapter_ext *ad = itf->adapter;
 	struct cpfl_rule_info_meta *rim = meta;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	if (!rim)
 		return ret;
@@ -83,6 +84,10 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 		 * Even index is tx queue and odd index is rx queue.
 		 */
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -122,6 +127,7 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	uint32_t i;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	rim = flow->rule;
 	if (!rim) {
@@ -135,6 +141,10 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	if (itf->type == CPFL_ITF_TYPE_VPORT) {
 		vport = (struct cpfl_vport *)itf;
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -257,6 +267,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 	int queue_id = -1;
 	bool fwd_vsi = false;
 	bool fwd_q = false;
+	bool is_vsi;
 	uint32_t i;
 	struct cpfl_rule_info *rinfo = &rim->rules[index];
 	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
@@ -267,6 +278,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 		action_type = action->type;
 		switch (action_type) {
 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
 			if (!fwd_vsi)
 				fwd_vsi = true;
 			else
@@ -285,12 +297,20 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 				queue_id = CPFL_INVALID_QUEUE_ID;
 			}
 
-			dev_id = cpfl_get_vsi_id(dst_itf);
+			is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
+				  dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR);
+			if (is_vsi)
+				dev_id = cpfl_get_vsi_id(dst_itf);
+			else
+				dev_id = cpfl_get_port_id(dst_itf);
 
 			if (dev_id == CPFL_INVALID_HW_ID)
 				goto err;
 
-			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			if (is_vsi)
+				*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			else
+				*act_set = cpfl_act_fwd_port(0, priority, 0, dev_id);
 			act_set++;
 			rinfo->act_byte_len += sizeof(union cpfl_action_set);
 			break;
@@ -414,6 +434,64 @@ cpfl_is_mod_action(const struct rte_flow_action actions[])
 	return false;
 }
 
+static bool
+cpfl_fxp_get_metadata_port(struct cpfl_itf *itf,
+			   const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	const struct rte_flow_action_ethdev *ethdev;
+	struct cpfl_itf *target_itf;
+	bool ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		ret = cpfl_metadata_write_port_id(itf);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "fail to write port id");
+			return false;
+		}
+	}
+
+	ret = cpfl_metadata_write_sourcevsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write source vsi id");
+		return false;
+	}
+
+	ret = cpfl_metadata_write_vsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write vsi id");
+		return false;
+	}
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			ethdev = (const struct rte_flow_action_ethdev *)action->conf;
+			target_itf = cpfl_get_itf_by_port_id(ethdev->port_id);
+			if (!target_itf) {
+				PMD_DRV_LOG(ERR, "fail to get target_itf by port id");
+				return false;
+			}
+			ret = cpfl_metadata_write_targetvsi(target_itf);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "fail to write target vsi id");
+				return false;
+			}
+			break;
+		default:
+			continue;
+		}
+	}
+
+	return true;
+}
+
 static int
 cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
@@ -430,7 +508,13 @@ cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	int ret;
 
-	ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr, &pr_action);
+	ret = cpfl_fxp_get_metadata_port(itf, actions);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Fail to save metadata.");
+		return -EINVAL;
+	}
+
+	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "No Match pattern support.");
 		return -EINVAL;
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 4d15a26c80..2ab04f1e60 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,8 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 static int
 cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
@@ -374,6 +376,22 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
+static int
+cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev,
+			   const struct rte_flow_ops **ops)
+{
+	if (!dev)
+		return -EINVAL;
+
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -385,6 +403,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
 
 	.link_update		= cpfl_repr_link_update,
+	.flow_ops_get		= cpfl_dev_repr_flow_ops_get,
 };
 
 static int
@@ -393,6 +412,7 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
 	struct cpfl_repr_param *param = init_param;
 	struct cpfl_adapter_ext *adapter = param->adapter;
+	int ret;
 
 	repr->repr_id = param->repr_id;
 	repr->vport_info = param->vport_info;
@@ -402,6 +422,15 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
 		repr->func_up = true;
 
+	TAILQ_INIT(&repr->itf.flow_list);
+	memset(repr->itf.dma, 0, sizeof(repr->itf.dma));
+	memset(repr->itf.msg, 0, sizeof(repr->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		return ret;
+
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v4 8/9] app/test-pmd: refine encap content
  2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
                         ` (6 preceding siblings ...)
  2023-08-15 16:50       ` [PATCH v4 7/9] net/cpfl: add flow support for representor Zhang, Yuying
@ 2023-08-15 16:50       ` Zhang, Yuying
  2023-08-15 16:50       ` [PATCH v4 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-15 16:50 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu
  Cc: mingxia.liu, stable

From: Yuying Zhang <yuying.zhang@intel.com>

Refine vxlan encap content of all protocol headers.

Fixes: 1960be7d32f8 ("app/testpmd: add VXLAN encap/decap")
Cc: stable@dpdk.org

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 app/test-pmd/cmdline_flow.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 94827bcc4a..b6cc0d9620 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -8514,7 +8514,7 @@ parse_setup_vxlan_encap_data(struct action_vxlan_encap_data *action_vxlan_encap_
 				.type = RTE_FLOW_ITEM_TYPE_END,
 			},
 		},
-		.item_eth.hdr.ether_type = 0,
+		.item_eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4),
 		.item_vlan = {
 			.hdr.vlan_tci = vxlan_encap_conf.vlan_tci,
 			.hdr.eth_proto = 0,
@@ -8522,24 +8522,32 @@ parse_setup_vxlan_encap_data(struct action_vxlan_encap_data *action_vxlan_encap_
 		.item_ipv4.hdr = {
 			.src_addr = vxlan_encap_conf.ipv4_src,
 			.dst_addr = vxlan_encap_conf.ipv4_dst,
+			.version_ihl = RTE_IPV4_VHL_DEF,
+			.next_proto_id = IPPROTO_UDP,
+			.time_to_live = IPDEFTTL,
+			.hdr_checksum = rte_cpu_to_be_16(1),
 		},
 		.item_udp.hdr = {
 			.src_port = vxlan_encap_conf.udp_src,
 			.dst_port = vxlan_encap_conf.udp_dst,
+			.dgram_cksum = RTE_BE16(0x01),
 		},
-		.item_vxlan.hdr.flags = 0,
+		.item_vxlan.hdr.flags = 0x08,
 	};
 	memcpy(action_vxlan_encap_data->item_eth.hdr.dst_addr.addr_bytes,
 	       vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(action_vxlan_encap_data->item_eth.hdr.src_addr.addr_bytes,
 	       vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
 	if (!vxlan_encap_conf.select_ipv4) {
+		action_vxlan_encap_data->item_eth.type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
 		memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
 		       &vxlan_encap_conf.ipv6_src,
 		       sizeof(vxlan_encap_conf.ipv6_src));
 		memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
 		       &vxlan_encap_conf.ipv6_dst,
 		       sizeof(vxlan_encap_conf.ipv6_dst));
+		action_vxlan_encap_data->item_ipv6.hdr.proto = IPPROTO_UDP;
+		action_vxlan_encap_data->item_ipv6.hdr.hop_limits = IPDEFTTL;
 		action_vxlan_encap_data->items[2] = (struct rte_flow_item){
 			.type = RTE_FLOW_ITEM_TYPE_IPV6,
 			.spec = &action_vxlan_encap_data->item_ipv6,
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v4 9/9] net/cpfl: fix incorrect status calculation
  2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
                         ` (7 preceding siblings ...)
  2023-08-15 16:50       ` [PATCH v4 8/9] app/test-pmd: refine encap content Zhang, Yuying
@ 2023-08-15 16:50       ` Zhang, Yuying
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-15 16:50 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Fix the incorrect ingress packet number calculation.

Fixes: e3289d8fb63f ("net/cpfl: support basic statistics")

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index cb407e66af..5b5abc7684 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -322,7 +322,7 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
 		idpf_vport_stats_update(&vport->eth_stats_offset, pstats);
 		stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
-				pstats->rx_broadcast - pstats->rx_discards;
+				  pstats->rx_broadcast;
 		stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
 						pstats->tx_unicast;
 		stats->imissed = pstats->rx_discards;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v6 0/8] add rte flow support for cpfl
  2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
@ 2023-08-22  1:02       ` Zhang, Yuying
  2023-08-22  1:02         ` [PATCH v6 1/8] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
                           ` (8 more replies)
  2023-09-15 10:00       ` [PATCH v5 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
                         ` (8 subsequent siblings)
  9 siblings, 9 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-22  1:02 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

This patchset add rte flow support for cpfl driver.
It depends on the following patch set:
http://patchwork.dpdk.org/project/dpdk/cover/20230912173039.1612287-1-beilei.xing@intel.com/

Wenjing Qiao (2):
  net/cpfl: add json parser for rte flow pattern rules
  net/cpfl: add mod rule parser support for rte flow

Yuying Zhang (6):
  net/cpfl: set up rte flow skeleton
  net/cpfl: set up control path
  net/cpfl: add FXP low level implementation
  net/cpfl: add fxp rule module
  net/cpfl: add fxp flow engine
  net/cpfl: add flow support for representor

 doc/guides/nics/cpfl.rst                |   43 +
 doc/guides/rel_notes/release_23_11.rst  |    1 +
 drivers/net/cpfl/cpfl_actions.h         |  858 +++++++++++
 drivers/net/cpfl/cpfl_controlq.c        |  803 ++++++++++
 drivers/net/cpfl/cpfl_controlq.h        |   75 +
 drivers/net/cpfl/cpfl_ethdev.c          |  392 ++++-
 drivers/net/cpfl/cpfl_ethdev.h          |  128 ++
 drivers/net/cpfl/cpfl_flow.c            |  339 +++++
 drivers/net/cpfl/cpfl_flow.h            |   85 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c |  667 +++++++++
 drivers/net/cpfl/cpfl_flow_parser.c     | 1827 +++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h     |  267 ++++
 drivers/net/cpfl/cpfl_fxp_rule.c        |  296 ++++
 drivers/net/cpfl/cpfl_fxp_rule.h        |   68 +
 drivers/net/cpfl/cpfl_representor.c     |   29 +
 drivers/net/cpfl/cpfl_rules.c           |  126 ++
 drivers/net/cpfl/cpfl_rules.h           |  306 ++++
 drivers/net/cpfl/cpfl_vchnl.c           |  144 ++
 drivers/net/cpfl/meson.build            |   12 +
 19 files changed, 6465 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v6 1/8] net/cpfl: add json parser for rte flow pattern rules
  2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
@ 2023-08-22  1:02         ` Zhang, Yuying
  2023-08-22  1:02         ` [PATCH v6 2/8] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
                           ` (7 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-22  1:02 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Add devargs "flow_parser" for rte flow json parser which
depends on jansson library.

Example:
    -a ca:00.0,flow_parser="refpkg.json"

Add json parser for rte flow pattern rules.The cpfl
PMD supports utilizing a JSON config file to translate
rte flow tokens into low level hardware resources.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 doc/guides/nics/cpfl.rst            |   30 +
 drivers/net/cpfl/cpfl_ethdev.c      |   38 +-
 drivers/net/cpfl/cpfl_ethdev.h      |   76 ++
 drivers/net/cpfl/cpfl_flow_parser.c | 1295 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  167 ++++
 drivers/net/cpfl/meson.build        |    7 +
 6 files changed, 1612 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 83a18c3f2e..15680a10a6 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -128,12 +128,24 @@ Runtime Configuration
 
     -a BDF,representor=vf[0-3],representor=c1pf1
 
+- ``flow_parser`` (default ``not enabled``)
+
+  The cpfl PMD supports utilizing a JSON config file to translate rte_flow tokens into
+  low-level hardware resources.
+  Using the ``devargs`` option ``flow_parser`` the user can specify the path
+  of a json file, for example::
+
+    -a ca:00.0,flow_parser="refpkg.json"
+
+  Then the PMD will load json file for device ``ca:00.0``.
+  The parameter is optional.
 
 Driver compilation and testing
 ------------------------------
 
 Refer to the document :doc:`build_and_test` for details.
 
+The jansson library must be installed to use rte_flow.
 
 Features
 --------
@@ -164,3 +176,21 @@ Hairpin queue
 E2100 Series can loopback packets from RX port to TX port.
 This feature is called port-to-port or hairpin.
 Currently, the PMD only supports single port hairpin.
+
+Rte_flow
+~~~~~~~~~~~~~
+
+PMD uses a json file to direct CPF PMD to parse rte_flow tokens into
+low level hardware resources.
+
+- Required Libraries
+
+  * jansson
+
+    * For Ubuntu, it can be installed using `apt install libjansson-dev`
+
+- run testpmd with the json file
+
+   .. code-block:: console
+
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 189072ab33..1745f703c8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -21,6 +21,10 @@
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
 
+#ifdef RTE_HAS_JANSSON
+#define CPFL_FLOW_PARSER	"flow_parser"
+#endif
+
 rte_spinlock_t cpfl_adapter_lock;
 /* A list for all adapters, one adapter matches one PCI device */
 struct cpfl_adapter_list cpfl_adapter_list;
@@ -31,6 +35,9 @@ static const char * const cpfl_valid_args_first[] = {
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
+#ifdef RTE_HAS_JANSSON
+	CPFL_FLOW_PARSER,
+#endif
 	NULL
 };
 
@@ -1537,6 +1544,24 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 	return 0;
 }
 
+#ifdef RTE_HAS_JANSSON
+static int
+parse_file(const char *key, const char *value, void *args)
+{
+	char *name = args;
+
+	if (strlen(value) > CPFL_FLOW_FILE_LEN - 1) {
+		PMD_DRV_LOG(ERR, "file path(%s) is too long.", value);
+		return -1;
+	}
+
+	PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key);
+	strlcpy(name, value, CPFL_FLOW_FILE_LEN);
+
+	return 0;
+}
+#endif
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
@@ -1585,7 +1610,18 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 				 &adapter->base.is_rx_singleq);
 	if (ret != 0)
 		goto fail;
-
+#ifdef RTE_HAS_JANSSON
+	if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) {
+		ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER,
+					 &parse_file, cpfl_args->flow_parser);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to parser flow_parser, ret: %d", ret);
+			goto fail;
+		}
+	} else {
+		cpfl_args->flow_parser[0] = '\0';
+	}
+#endif
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index d0dcc0cc05..383dbd14c6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -77,6 +77,11 @@
 #define CPFL_VPORT_LAN_PF	0
 #define CPFL_VPORT_LAN_VF	1
 
+#define CPFL_FLOW_FILE_LEN 100
+#define CPFL_INVALID_HW_ID	UINT16_MAX
+#define CPFL_META_CHUNK_LENGTH	1024
+#define CPFL_META_LENGTH	32
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -99,6 +104,7 @@ struct cpfl_devargs {
 	uint16_t req_vport_nb;
 	uint8_t repr_args_num;
 	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
+	char flow_parser[CPFL_FLOW_FILE_LEN];
 };
 
 struct p2p_queue_chunks_info {
@@ -165,6 +171,20 @@ struct cpfl_repr {
 	bool func_up; /* If the represented function is up */
 };
 
+struct cpfl_metadata_chunk {
+	int type;
+	uint8_t data[CPFL_META_CHUNK_LENGTH];
+};
+
+/**
+ * It is driver's responsibility to simlulate a metadata buffer which
+ * can be used as data source to fill the key of a flow rule.
+ */
+struct cpfl_metadata {
+	int length;
+	struct cpfl_metadata_chunk chunks[CPFL_META_LENGTH];
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -185,6 +205,8 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
+
+	struct cpfl_metadata meta;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -211,4 +233,58 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
+static inline uint16_t
+cpfl_get_port_id(struct cpfl_itf *itf)
+{
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		struct cpfl_vport *vport = (void *)itf;
+
+		return vport->base.devarg_id;
+	}
+
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline uint16_t
+cpfl_get_vsi_id(struct cpfl_itf *itf)
+{
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_vport_info *info;
+	uint32_t vport_id;
+	int ret;
+	struct cpfl_vport_id vport_identity;
+
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		struct cpfl_repr *repr = (void *)itf;
+
+		return repr->vport_info->vport.info.vsi_id;
+	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
+
+		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		/* host: CPFL_HOST0_CPF_ID, acc: CPFL_ACC_CPF_ID */
+		vport_identity.pf_id = CPFL_ACC_CPF_ID;
+		vport_identity.vf_id = 0;
+		vport_identity.vport_id = vport_id;
+		ret = rte_hash_lookup_data(adapter->vport_map_hash,
+					   &vport_identity,
+					   (void **)&info);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "vport id not exist");
+			goto err;
+		}
+
+		return info->vport.info.vsi_id;
+	}
+
+err:
+	return CPFL_INVALID_HW_ID;
+}
+
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
new file mode 100644
index 0000000000..be5e983a96
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -0,0 +1,1295 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <arpa/inet.h>
+#include <asm-generic/errno-base.h>
+#include <stdint.h>
+
+#include "cpfl_flow_parser.h"
+#include "cpfl_ethdev.h"
+#include "rte_malloc.h"
+
+static enum rte_flow_item_type
+cpfl_get_item_type_by_str(const char *type)
+{
+	if (strcmp(type, "eth") == 0)
+		return RTE_FLOW_ITEM_TYPE_ETH;
+	else if (strcmp(type, "ipv4") == 0)
+		return RTE_FLOW_ITEM_TYPE_IPV4;
+	else if (strcmp(type, "tcp") == 0)
+		return RTE_FLOW_ITEM_TYPE_TCP;
+	else if (strcmp(type, "udp") == 0)
+		return RTE_FLOW_ITEM_TYPE_UDP;
+	else if (strcmp(type, "vxlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VXLAN;
+	else if (strcmp(type, "icmp") == 0)
+		return RTE_FLOW_ITEM_TYPE_ICMP;
+	else if (strcmp(type, "vlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VLAN;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ITEM_TYPE_VOID;
+}
+
+static const char *
+cpfl_json_t_to_string(json_t *object, const char *name)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return NULL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return NULL;
+	}
+
+	return json_string_value(subobject);
+}
+
+static int
+cpfl_json_t_to_uint16(json_t *object, const char *name, uint16_t *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_integer_value(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_json_t_to_uint32(json_t *object, const char *name, uint32_t *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_integer_value(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_attr(json_t *ob_pr_key_attrs, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len;
+	struct cpfl_flow_js_pr_key_attr *attr;
+
+	len = json_array_size(ob_pr_key_attrs);
+	js_pr->key.attributes = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_attr), 0);
+	if (!js_pr->key.attributes) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->key.attr_size = len;
+	attr = js_pr->key.attributes;
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		const char *name;
+		uint16_t value = 0;
+		int ret;
+
+		object = json_array_get(ob_pr_key_attrs, i);
+		name = cpfl_json_t_to_string(object, "Name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'Name'.");
+			goto err;
+		}
+		ret = cpfl_json_t_to_uint16(object, "Value", &value);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+			goto err;
+		}
+		if (strcmp(name, "ingress") == 0) {
+			attr->ingress = value;
+		} else if (strcmp(name, "egress") == 0) {
+			attr->egress = value;
+		} else {
+			/* TODO: more... */
+			PMD_DRV_LOG(ERR, "Not support attr name: %s.", name);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	rte_free(js_pr->key.attributes);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto_field(json_t *ob_fields,
+				     struct cpfl_flow_js_pr_key_proto *js_field)
+{
+	int len, i;
+
+	if (!ob_fields)
+		return 0;
+	len = json_array_size(ob_fields);
+	if (len == 0)
+		return 0;
+	js_field->fields_size = len;
+	js_field->fields =
+	    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len, 0);
+	if (!js_field->fields) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		const char *name, *mask;
+
+		object = json_array_get(ob_fields, i);
+		name = cpfl_json_t_to_string(object, "name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
+			goto err;
+		}
+		if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+			PMD_DRV_LOG(ERR, "The 'name' is too long.");
+			goto err;
+		}
+		memcpy(js_field->fields[i].name, name, strlen(name));
+
+		if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
+		    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			mask = cpfl_json_t_to_string(object, "mask");
+			if (!mask) {
+				PMD_DRV_LOG(ERR, "Can not parse string 'mask'.");
+				goto err;
+			}
+			if (strlen(mask) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+				PMD_DRV_LOG(ERR, "The 'mask' is too long.");
+				goto err;
+			}
+			memcpy(js_field->fields[i].mask, mask, strlen(mask));
+		} else {
+			uint32_t mask_32b;
+			int ret;
+
+			ret = cpfl_json_t_to_uint32(object, "mask", &mask_32b);
+			if (ret < 0) {
+				PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'.");
+				goto err;
+			}
+			js_field->fields[i].mask_32b = mask_32b;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_field->fields);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto(json_t *ob_pr_key_protos, struct cpfl_flow_js_pr *js_pr)
+{
+	int len, i, ret;
+
+	len = json_array_size(ob_pr_key_protos);
+	if (len == 0)
+		return 0;
+	js_pr->key.proto_size = len;
+	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto) * len, 0);
+	if (!js_pr->key.protocols) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_t *object, *ob_fields;
+		const char *type;
+		enum rte_flow_item_type item_type;
+
+		object = json_array_get(ob_pr_key_protos, i);
+		/* pr->key->proto->type */
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		item_type = cpfl_get_item_type_by_str(type);
+		if (item_type == RTE_FLOW_ITEM_TYPE_VOID)
+			goto err;
+		js_pr->key.protocols[i].type = item_type;
+		/* pr->key->proto->fields */
+		ob_fields = json_object_get(object, "fields");
+		ret = cpfl_flow_js_pattern_key_proto_field(ob_fields,
+							   &js_pr->key.protocols[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	rte_free(js_pr->key.protocols);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_proto(json_t *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	uint16_t layer = 0, offset = 0, mask = 0;
+	const char *header;
+	enum rte_flow_item_type type;
+	int ret;
+
+	ret = cpfl_json_t_to_uint16(ob_value, "layer", &layer);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+		return -EINVAL;
+	}
+
+	header = cpfl_json_t_to_string(ob_value, "header");
+	if (!header) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "offset", &offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "mask", &mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
+		return -EINVAL;
+	}
+	type = cpfl_get_item_type_by_str(header);
+	if (type == RTE_FLOW_ITEM_TYPE_VOID)
+		return -EINVAL;
+	js_fv->proto.layer = layer;
+	js_fv->proto.offset = offset;
+	js_fv->proto.mask = mask;
+	js_fv->proto.header = type;
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_metadata(json_t *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	int ret;
+
+	ret = cpfl_json_t_to_uint16(ob_value, "type", &js_fv->meta.type);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "offset", &js_fv->meta.offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "mask", &js_fv->meta.mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv(json_t *ob_fvs, struct cpfl_flow_js_pr_action *js_act)
+{
+	int len, i;
+
+	len = json_array_size(ob_fvs);
+	if (len == 0)
+		return 0;
+	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0);
+	if (!js_act->sem.fv) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_act->sem.fv_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_fv *js_fv;
+		json_t *object, *ob_value;
+		uint16_t offset = 0;
+		const char *type;
+		int ret;
+
+		js_fv = &js_act->sem.fv[i];
+		object = json_array_get(ob_fvs, i);
+		ret = cpfl_json_t_to_uint16(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_fv->offset = offset;
+
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		ob_value = json_object_get(object, "value");
+		if (strcmp(type, "immediate") == 0) {
+			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
+			js_fv->immediate = json_integer_value(ob_value);
+		} else if (strcmp(type, "metadata") == 0) {
+			js_fv->type = CPFL_FV_TYPE_METADATA;
+			cpfl_flow_js_pattern_act_fv_metadata(ob_value, js_fv);
+		} else if (strcmp(type, "protocol") == 0) {
+			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
+			cpfl_flow_js_pattern_act_fv_proto(ob_value, js_fv);
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_act->sem.fv);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_per_act(json_t *ob_per_act, struct cpfl_flow_js_pr_action *js_act)
+{
+	const char *type;
+	int ret;
+
+	/* pr->actions->type */
+	type = cpfl_json_t_to_string(ob_per_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* pr->actions->data */
+	if (strcmp(type, "sem") == 0) {
+		json_t *ob_fvs, *ob_sem;
+
+		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+		ob_sem = json_object_get(ob_per_act, "data");
+		ret = cpfl_json_t_to_uint16(ob_sem, "profile", &js_act->sem.prof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_t_to_uint16(ob_sem, "subprofile", &js_act->sem.subprof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_t_to_uint16(ob_sem, "keysize", &js_act->sem.keysize);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
+			return -EINVAL;
+		}
+		ob_fvs = json_object_get(ob_sem, "fieldvectors");
+		ret = cpfl_flow_js_pattern_act_fv(ob_fvs, js_act);
+		if (ret < 0)
+			return ret;
+	} else {
+		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act(json_t *ob_pr_acts, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len, ret;
+
+	len = json_array_size(ob_pr_acts);
+	if (len == 0)
+		return 0;
+	js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action) * len, 0);
+	if (!js_pr->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->actions_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_pr_action *js_act;
+		json_t *object;
+
+		object = json_array_get(ob_pr_acts, i);
+		js_act = &js_pr->actions[i];
+		ret = cpfl_flow_js_pattern_per_act(object, js_act);
+		if (ret < 0) {
+			rte_free(js_pr->actions);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * The patterns object array defines a set of rules directing the PMD to match sequences of
+ * rte_flow protocol headers and translate them into profile/field vectors for each pipeline
+ * stage. This object is mandatory.
+ */
+static int
+cpfl_flow_js_pattern_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_t *ob_prs;
+	int i, len;
+
+	/* Pattern Rules */
+	ob_prs = json_object_get(ob_root, "patterns");
+	if (!ob_prs) {
+		PMD_DRV_LOG(ERR, "The patterns is mandatory.");
+		return -EINVAL;
+	}
+
+	len = json_array_size(ob_prs);
+	if (len == 0)
+		return 0;
+	parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len, 0);
+	if (!parser->patterns) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	parser->pr_size = len;
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		json_t *ob_pr_actions;
+		json_t *ob_pr_key;
+		json_t *ob_pr_key_protos;
+		json_t *ob_pr_key_attrs;
+		int ret;
+
+		object = json_array_get(ob_prs, i);
+		/* pr->key */
+		ob_pr_key = json_object_get(object, "key");
+		/* pr->key->protocols */
+		ob_pr_key_protos = json_object_get(ob_pr_key, "protocols");
+		ret = cpfl_flow_js_pattern_key_proto(ob_pr_key_protos, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->protocols.");
+			goto err;
+		}
+		/* pr->key->attributes */
+		ob_pr_key_attrs = json_object_get(ob_pr_key, "attributes");
+		ret = cpfl_flow_js_pattern_key_attr(ob_pr_key_attrs, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->attributes.");
+			goto err;
+		}
+		/* pr->actions */
+		ob_pr_actions = json_object_get(object, "actions");
+		ret = cpfl_flow_js_pattern_act(ob_pr_actions, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->patterns);
+	return -EINVAL;
+}
+
+static int
+cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	int ret = 0;
+
+	ret = cpfl_flow_js_pattern_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char *filename)
+{
+	struct cpfl_flow_js_parser *parser;
+	json_error_t json_error;
+	json_t *root;
+	int ret;
+
+	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser), 0);
+	if (!parser) {
+		PMD_DRV_LOG(ERR, "Not enough memory to create flow parser.");
+		return -ENOMEM;
+	}
+	root = json_load_file(filename, 0, &json_error);
+	if (!root) {
+		PMD_DRV_LOG(ERR, "Bad JSON file \"%s\": %s", filename, json_error.text);
+		goto free_parser;
+	}
+	ret = cpfl_parser_init(root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parser init failed.");
+		goto free_parser;
+	}
+	*flow_parser = parser;
+	json_decref(root);
+
+	return 0;
+free_parser:
+	rte_free(parser);
+	return -EINVAL;
+}
+
+static void
+cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
+{
+	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM)
+		rte_free(pr_act->sem.fv);
+}
+
+int
+cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
+{
+	int i, j;
+
+	if (!parser)
+		return 0;
+
+	for (i = 0; i < parser->pr_size; i++) {
+		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
+
+		if (!pattern)
+			continue;
+		for (j = 0; j < pattern->key.proto_size; j++)
+			rte_free(pattern->key.protocols[j].fields);
+		rte_free(pattern->key.protocols);
+		rte_free(pattern->key.attributes);
+
+		for (j = 0; j < pattern->actions_size; j++) {
+			struct cpfl_flow_js_pr_action *pr_act;
+
+			pr_act = &pattern->actions[j];
+			cpfl_parser_free_pr_action(pr_act);
+		}
+		rte_free(pattern->actions);
+	}
+	rte_free(parser->patterns);
+	rte_free(parser);
+
+	return 0;
+}
+
+static int
+cpfl_get_items_length(const struct rte_flow_item *items)
+{
+	int length = 0;
+	const struct rte_flow_item *item = items;
+
+	while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
+		       uint16_t offset, uint8_t *fv)
+{
+	uint16_t v_layer, v_offset, v_mask;
+	enum rte_flow_item_type v_header;
+	int j, layer, length;
+	uint16_t temp_fv;
+
+	length = cpfl_get_items_length(items);
+	v_layer = js_fv->proto.layer;
+	v_header = js_fv->proto.header;
+	v_offset = js_fv->proto.offset;
+	v_mask = js_fv->proto.mask;
+	layer = 0;
+	for (j = 0; j < length - 1; j++) {
+		if (items[j].type == v_header) {
+			if (layer == v_layer) {
+				/* copy out 16 bits from offset */
+				const uint8_t *pointer;
+
+				pointer = &(((const uint8_t *)(items[j].spec))[v_offset]);
+				temp_fv = ntohs((*((const uint16_t *)pointer)) & v_mask);
+				fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+				fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+				break;
+			}
+			layer++;
+		} /* TODO: more type... */
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_fieldvectors(struct cpfl_itf *itf, struct cpfl_flow_js_fv *js_fvs, int size,
+			uint8_t *fv, const struct rte_flow_item *items)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		uint16_t offset, temp_fv, value_int;
+		enum cpfl_flow_js_fv_type type;
+		struct cpfl_flow_js_fv *js_fv;
+
+		js_fv = &js_fvs[i];
+		offset = js_fv->offset;
+		type = js_fv->type;
+		if (type == CPFL_FV_TYPE_IMMEDIATE) {
+			value_int = js_fv->immediate;
+			temp_fv = (value_int << 8) & 0xff00;
+			fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+		} else if (type == CPFL_FV_TYPE_METADATA) {
+			uint16_t type, v_offset, mask;
+
+			type = js_fv->meta.type;
+			v_offset = js_fv->meta.offset;
+			mask = js_fv->meta.mask;
+			temp_fv = cpfl_metadata_read16(&itf->adapter->meta, type, v_offset) & mask;
+			fv[2 * offset] = (uint8_t)(temp_fv & 0x00ff);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv >> 8);
+		} else if (type == CPFL_FV_TYPE_PROTOCOL) {
+			ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv);
+			if (ret)
+				return ret;
+		} else {
+			PMD_DRV_LOG(DEBUG, "not support this type: %d.", type);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_pr_actions(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_pr_action *actions,
+		      int size,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		struct cpfl_flow_js_pr_action *pr_act;
+		enum cpfl_flow_pr_action_type type;
+
+		pr_act = &actions[i];
+		/* pr->actions->type */
+		type = pr_act->type;
+		/* pr->actions->data */
+		if (attr->group == 1  && type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+			struct cpfl_flow_js_pr_action_sem *sem = &pr_act->sem;
+
+			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+			pr_action->sem.prof = sem->prof;
+			pr_action->sem.subprof = sem->subprof;
+			pr_action->sem.keysize = sem->keysize;
+			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
+			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
+			ret = cpfl_parse_fieldvectors(itf, sem->fv, sem->fv_size,
+						      pr_action->sem.cpfl_flow_pr_fv, items);
+			return ret;
+		} else if (attr->group > 4 || attr->group == 0) {
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_eth_mask(const char *mask, struct rte_ether_addr addr)
+{
+	int i, ret;
+	struct rte_ether_addr mask_bytes;
+
+	ret = rte_ether_unformat_addr(mask, &mask_bytes);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "translate mac address from string to rte_ether_addr failed.");
+		return -EINVAL;
+	}
+	/* validate eth mask addr if match */
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+		if (mask_bytes.addr_bytes[i] != addr.addr_bytes[i])
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
+{
+	uint32_t out_addr;
+
+	/* 0: success; -EINVAL: invalid; -ENOTSUP: fail */
+	int ret = inet_pton(AF_INET, mask, &out_addr);
+
+	if (ret < 0)
+		return -EINVAL;
+	/* validate ipv4 mask addr if match */
+	if (out_addr != addr)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_eth *eth_mask)
+{
+	int field_size, j;
+	int flag_dst_addr, flag_src_addr, flag_ether_type;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !eth_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_ether_type = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name, *s_mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst, more see Field Mapping
+		 */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		if (strcmp(name, "src_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->src) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->dst) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "ether_type") == 0) {
+			uint16_t mask = (uint16_t)field->mask_32b;
+
+			if (mask != eth_mask->type)
+				return -EINVAL;
+			flag_ether_type = true;
+		} else {
+			/* TODO: more type... */
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (strcmp((const char *)eth_mask->src.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (strcmp((const char *)eth_mask->dst.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_ether_type) {
+		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+	int field_size, j;
+	int flag_next_proto_id, flag_src_addr, flag_dst_addr;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !ipv4_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_next_proto_id = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+
+		field = &proto->fields[j];
+		name = field->name;
+		if (strcmp(name, "src_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.src_addr) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.dst_addr) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "next_proto_id") == 0) {
+			uint8_t mask;
+
+			mask = (uint8_t)field->mask_32b;
+			if (mask != ipv4_mask->hdr.next_proto_id)
+				return -EINVAL;
+			flag_next_proto_id = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_next_proto_id) {
+		if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_tcp *tcp_mask)
+{
+	int field_size, j;
+	int flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !tcp_mask)
+		return 0;
+
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (tcp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (tcp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (tcp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (tcp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_udp *udp_mask)
+{
+	int field_size, j;
+	bool flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && !udp_mask)
+		return 0;
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst */
+		name = field->name; /* match: rte_flow_item->mask */
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (udp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (udp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name: %s.", name);
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (udp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (udp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto,
+		 const struct rte_flow_item_vxlan *vxlan_mask)
+{
+	int field_size, j;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && !vxlan_mask)
+		return 0;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		int64_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		mask = (int64_t)field->mask_32b;
+		if (strcmp(name, "vx_vni") == 0) {
+			if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) != mask)
+				return -EINVAL;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_icmp *icmp_mask)
+{
+	int field_size;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if ((field_size != 0 && !icmp_mask) || (field_size == 0 && icmp_mask))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
+			     int proto_size,
+			     const struct rte_flow_item *items)
+{
+	int i, length;
+	int j = 0;
+
+	length = cpfl_get_items_length(items);
+	if (proto_size > length - 1)
+		return -EINVAL;
+	for (i = 0; i < proto_size; i++) {
+		struct cpfl_flow_js_pr_key_proto *key_proto;
+		enum rte_flow_item_type type;
+
+		key_proto = &protocols[i];
+		/* pr->key->proto->type */
+		type = key_proto->type;
+		/* pr->key->proto->fields */
+		switch (type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
+				const struct rte_flow_item_eth *eth_mask;
+				int ret;
+
+				eth_mask = (const struct rte_flow_item_eth *)items[i].mask;
+				ret = cpfl_check_eth(key_proto, eth_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				const struct rte_flow_item_ipv4 *ipv4_mask;
+				int ret;
+
+				ipv4_mask = (const struct rte_flow_item_ipv4 *)items[i].mask;
+				ret = cpfl_check_ipv4(key_proto, ipv4_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) {
+				const struct rte_flow_item_tcp *tcp_mask;
+				int ret;
+
+				tcp_mask = (const struct rte_flow_item_tcp *)items[i].mask;
+				ret = cpfl_check_tcp(key_proto, tcp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) {
+				const struct rte_flow_item_udp *udp_mask;
+				int ret;
+
+				udp_mask = (const struct rte_flow_item_udp *)items[i].mask;
+				ret = cpfl_check_udp(key_proto, udp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+				const struct rte_flow_item_vxlan *vxlan_mask;
+				int ret;
+
+				vxlan_mask = (const struct rte_flow_item_vxlan *)items[i].mask;
+				ret = cpfl_check_vxlan(key_proto, vxlan_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) {
+				const struct rte_flow_item_icmp *icmp_mask;
+				int ret;
+
+				icmp_mask = (const struct rte_flow_item_icmp *)items[i].mask;
+				ret = cpfl_check_icmp(key_proto, icmp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+	if (items[j].type != RTE_FLOW_ITEM_TYPE_END)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr,
+			    const struct rte_flow_attr *attr)
+{
+	if (key_attr->ingress != attr->ingress) {
+		PMD_DRV_LOG(DEBUG, "ingress not match.");
+		return -EINVAL;
+	}
+	if (key_attr->egress != attr->egress) {
+		PMD_DRV_LOG(DEBUG, "egress not match.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern,
+		       const struct rte_flow_item *items,
+		       const struct rte_flow_attr *attr)
+{
+	int ret;
+
+	/* pr->key */
+	/* pr->key->protocols */
+	ret = cpfl_check_pattern_key_proto(pattern->key.protocols,
+					   pattern->key.proto_size, items);
+	if (ret < 0)
+		return -EINVAL;
+	/* pr->key->attributes */
+	ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr);
+	if (ret < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* output: struct cpfl_flow_pr_action* pr_action */
+int
+cpfl_flow_parse_items(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_parser *parser,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, size;
+	struct cpfl_flow_js_pr *pattern;
+
+	size = parser->pr_size;
+	for (i = 0; i < size; i++) {
+		int ret;
+
+		pattern = &parser->patterns[i];
+		ret = cpfl_check_pattern_key(pattern, items, attr);
+		if (ret < 0)
+			continue;
+		/* pr->actions */
+		ret = cpfl_parse_pr_actions(itf, pattern->actions, pattern->actions_size,
+					    items, attr, pr_action);
+		return ret;
+	}
+
+	return -EINVAL;
+}
+
+bool
+cpfl_metadata_write_port_id(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 5;
+
+	dev_id = cpfl_get_port_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID\n");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 3);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_targetvsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 2;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 1);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 0;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
+
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 24;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
new file mode 100644
index 0000000000..367a6da574
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <jansson.h>
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#ifndef _CPFL_FLOW_PARSER_H_
+#define _CPFL_FLOW_PARSER_H_
+
+#define CPFL_FLOW_JSON_STR_SIZE_MAX 100
+#define CPFL_MAX_SEM_FV_KEY_SIZE 64
+#define CPFL_FLOW_JS_PROTO_SIZE 16
+#define CPFL_MOD_KEY_NUM_MAX 8
+
+/* Pattern Rules Storage */
+enum cpfl_flow_pr_action_type {
+	CPFL_JS_PR_ACTION_TYPE_SEM,
+	CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
+};
+
+/* This structure matches a sequence of fields in struct rte_flow_attr */
+struct cpfl_flow_js_pr_key_attr {
+	uint16_t ingress;
+	uint16_t egress;
+};
+
+struct cpfl_flow_js_pr_key_proto_field {
+	char name[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	union {
+		char mask[CPFL_FLOW_JSON_STR_SIZE_MAX];
+		uint32_t mask_32b;
+	};
+};
+
+/* This structure matches a sequence of "struct rte_flow_item" */
+struct cpfl_flow_js_pr_key_proto {
+	enum rte_flow_item_type type;
+	struct cpfl_flow_js_pr_key_proto_field *fields;
+	int fields_size;
+};
+
+enum cpfl_flow_js_fv_type {
+	CPFL_FV_TYPE_PROTOCOL,
+	CPFL_FV_TYPE_IMMEDIATE,
+	CPFL_FV_TYPE_METADATA,
+	CPFL_FV_TYPE_UNKNOWN = -1,
+};
+
+struct cpfl_flow_js_fv {
+	uint16_t offset;
+	enum cpfl_flow_js_fv_type type;
+	union {
+		/*  a 16 bits value */
+		uint16_t immediate;
+		/* a reference to a protocol header with a <header, layer, offset, mask> tuple */
+		struct {
+			enum rte_flow_item_type header;
+			uint16_t layer;
+			uint16_t offset;
+			uint16_t mask;
+		} proto;
+		/* a reference to a metadata */
+		struct {
+			uint16_t type;
+			uint16_t offset;
+			uint16_t mask;
+		} meta;
+	};
+};
+
+/**
+ * This structure defines the message be used to composite the
+ * profile / key of a SEM control packet
+ */
+struct cpfl_flow_js_pr_action_sem {
+	uint16_t prof;		    /* SEM profile ID */
+	uint16_t subprof;	    /* SEM subprofile ID */
+	uint16_t keysize;	    /*  extract key size in bytes */
+	struct cpfl_flow_js_fv *fv; /* A SEM field vector array */
+	int fv_size;
+};
+
+/* define how to map current key to low level pipeline configuration */
+struct cpfl_flow_js_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_js_pr_action_sem sem;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD how to parse rte_flow
+ * protocol headers. Each rule be described by a key object and a action array.
+ */
+struct cpfl_flow_js_pr {
+	struct {
+		struct cpfl_flow_js_pr_key_proto *protocols;
+		uint16_t proto_size;
+		struct cpfl_flow_js_pr_key_attr *attributes;
+		uint16_t attr_size;
+	} key;
+	/* An array to define how to map current key to low level pipeline configuration. */
+	struct cpfl_flow_js_pr_action *actions;
+	uint16_t actions_size;
+};
+
+struct cpfl_flow_js_parser {
+	struct cpfl_flow_js_pr *patterns;
+	int pr_size;
+};
+
+/* Pattern Rules */
+struct cpfl_flow_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	uint8_t cpfl_flow_pr_fv[CPFL_MAX_SEM_FV_KEY_SIZE];
+};
+
+struct cpfl_flow_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_pr_action_sem sem;
+	};
+};
+
+int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
+int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
+int cpfl_flow_parse_items(struct cpfl_itf *itf,
+			  struct cpfl_flow_js_parser *parser,
+			  const struct rte_flow_item *items,
+			  const struct rte_flow_attr *attr,
+			  struct cpfl_flow_pr_action *pr_action);
+bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf);
+
+static inline void
+cpfl_metadata_init(struct cpfl_metadata *meta)
+{
+	int i;
+
+	for (i = 0; i < CPFL_META_LENGTH; i++)
+		meta->chunks[i].type = i;
+}
+
+static inline void
+cpfl_metadata_write16(struct cpfl_metadata *meta, int type, int offset, uint16_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint16_t));
+}
+
+static inline void
+cpfl_metadata_write32(struct cpfl_metadata *meta, int type, int offset, uint32_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint32_t));
+}
+
+static inline uint16_t
+cpfl_metadata_read16(struct cpfl_metadata *meta, int type, int offset)
+{
+	return *((uint16_t *)(&meta->chunks[type].data[offset]));
+}
+
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d8b92ae16a..d767818eb7 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -38,3 +38,10 @@ if arch_subdir == 'x86'
         cflags += ['-DCC_AVX512_SUPPORT']
     endif
 endif
+
+if dpdk_conf.has('RTE_HAS_JANSSON')
+    sources += files(
+            'cpfl_flow_parser.c',
+    )
+    ext_deps += jansson_dep
+endif
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v6 2/8] net/cpfl: add mod rule parser support for rte flow
  2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
  2023-08-22  1:02         ` [PATCH v6 1/8] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
@ 2023-08-22  1:02         ` Zhang, Yuying
  2023-08-22  1:02         ` [PATCH v6 3/8] net/cpfl: set up rte flow skeleton Zhang, Yuying
                           ` (6 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-22  1:02 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Add json parser support for rte flow modification rules.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_flow_parser.c | 534 +++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_flow_parser.h | 100 ++++++
 2 files changed, 633 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
index be5e983a96..f8cd6dd09a 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.c
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -32,6 +32,18 @@ cpfl_get_item_type_by_str(const char *type)
 	return RTE_FLOW_ITEM_TYPE_VOID;
 }
 
+static enum rte_flow_action_type
+cpfl_get_action_type_by_str(const char *type)
+{
+	if (strcmp(type, "vxlan_encap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	else if (strcmp(type, "vxlan_decap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ACTION_TYPE_VOID;
+}
+
 static const char *
 cpfl_json_t_to_string(json_t *object, const char *name)
 {
@@ -50,6 +62,25 @@ cpfl_json_t_to_string(json_t *object, const char *name)
 	return json_string_value(subobject);
 }
 
+static int
+cpfl_json_t_to_int(json_t *object, const char *name, int *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_integer_value(subobject);
+
+	return 0;
+}
+
 static int
 cpfl_json_t_to_uint16(json_t *object, const char *name, uint16_t *value)
 {
@@ -514,6 +545,228 @@ cpfl_flow_js_pattern_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 	return -EINVAL;
 }
 
+static int
+cpfl_flow_js_mr_key(json_t *ob_mr_keys, struct cpfl_flow_js_mr_key *js_mr_key)
+{
+	int len, i;
+
+	len = json_array_size(ob_mr_keys);
+	if (len == 0)
+		return 0;
+	js_mr_key->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_key_action) * len, 0);
+	if (!js_mr_key->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_mr_key->actions_size = len;
+	for (i = 0; i < len; i++) {
+		json_t *object, *ob_data;
+		const char *type;
+		enum rte_flow_action_type act_type;
+
+		object = json_array_get(ob_mr_keys, i);
+		/* mr->key->actions->type */
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		act_type = cpfl_get_action_type_by_str(type);
+		if (act_type == RTE_FLOW_ACTION_TYPE_VOID)
+			goto err;
+		js_mr_key->actions[i].type = act_type;
+		/* mr->key->actions->data */
+		ob_data = json_object_get(object, "data");
+		if (js_mr_key->actions[i].type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			json_t *ob_protos;
+			int proto_size, j;
+			struct cpfl_flow_js_mr_key_action_vxlan_encap *encap;
+
+			ob_protos = json_object_get(ob_data, "protocols");
+			encap = &js_mr_key->actions[i].encap;
+			if (!ob_protos) {
+				encap->proto_size = 0;
+				continue;
+			}
+			proto_size = json_array_size(ob_protos);
+			encap->proto_size = proto_size;
+			for (j = 0; j < proto_size; j++) {
+				const char *s;
+				json_t *subobject;
+				enum rte_flow_item_type proto_type;
+
+				subobject = json_array_get(ob_protos, j);
+				s = json_string_value(subobject);
+				proto_type = cpfl_get_item_type_by_str(s);
+				if (proto_type == RTE_FLOW_ITEM_TYPE_VOID) {
+					PMD_DRV_LOG(ERR, "parse VXLAN_ENCAP failed.");
+					goto err;
+				}
+				encap->protocols[j] = proto_type;
+			}
+		} else if (js_mr_key->actions[i].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			PMD_DRV_LOG(ERR, "not support this type: %d.", js_mr_key->actions[i].type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mr_key->actions);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_layout(json_t *ob_layouts, struct cpfl_flow_js_mr_action_mod *js_mod)
+{
+	int len, i;
+
+	len = json_array_size(ob_layouts);
+	js_mod->layout_size = len;
+	if (len == 0)
+		return 0;
+	js_mod->layout = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_layout) * len, 0);
+	if (!js_mod->layout) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		int index = 0, size = 0, offset = 0;
+		int ret;
+		const char *hint;
+
+		object = json_array_get(ob_layouts, i);
+		ret = cpfl_json_t_to_int(object, "index", &index);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'index'.");
+			goto err;
+		}
+		js_mod->layout[i].index = index;
+		ret = cpfl_json_t_to_int(object, "size", &size);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+			goto err;
+		}
+		js_mod->layout[i].size = size;
+		ret = cpfl_json_t_to_int(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_mod->layout[i].offset = offset;
+		hint = cpfl_json_t_to_string(object, "hint");
+		if (!hint) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'hint'.");
+			goto err;
+		}
+		memcpy(js_mod->layout[i].hint, hint, strlen(hint));
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mod->layout);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_action(json_t *ob_mr_act, struct cpfl_flow_js_mr_action *js_mr_act)
+{
+	json_t *ob_data;
+	const char *type;
+
+	/* mr->action->type */
+	type = cpfl_json_t_to_string(ob_mr_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* mr->action->data */
+	ob_data = json_object_get(ob_mr_act, "data");
+	if (strcmp(type, "mod") == 0) {
+		json_t *ob_layouts;
+		uint16_t profile = 0;
+		int ret;
+
+		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		ret = cpfl_json_t_to_uint16(ob_data, "profile", &profile);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		js_mr_act->mod.prof = profile;
+		ob_layouts = json_object_get(ob_data, "layout");
+		ret = cpfl_flow_js_mr_layout(ob_layouts, &js_mr_act->mod);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse layout.");
+			return ret;
+		}
+	} else  {
+		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * The modifications object array defines a set of rules for the PMD to match rte_flow
+ * modification actions and translate them into the Modification profile. This object
+ * is optional.
+ */
+static int
+cpfl_flow_js_mod_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_t *ob_mrs;
+	int i, len;
+
+	ob_mrs = json_object_get(ob_root, "modifications");
+	if (!ob_mrs) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+	len = json_array_size(ob_mrs);
+	if (len == 0)
+		return 0;
+	parser->mr_size = len;
+	parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr) * len, 0);
+	if (!parser->modifications) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		int ret;
+		json_t *object, *ob_mr_key, *ob_mr_action, *ob_mr_key_action;
+
+		object = json_array_get(ob_mrs, i);
+		/* mr->key */
+		ob_mr_key = json_object_get(object, "key");
+		/* mr->key->actions */
+		ob_mr_key_action = json_object_get(ob_mr_key, "actions");
+		ret = cpfl_flow_js_mr_key(ob_mr_key_action, &parser->modifications[i].key);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_key failed.");
+			goto err;
+		}
+		/* mr->action */
+		ob_mr_action = json_object_get(object, "action");
+		ret = cpfl_flow_js_mr_action(ob_mr_action, &parser->modifications[i].action);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_action failed.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->modifications);
+	return -EINVAL;
+}
+
 static int
 cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 {
@@ -524,6 +777,11 @@ cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
 		return ret;
 	}
+	ret = cpfl_flow_js_mod_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
+		return ret;
+	}
 
 	return 0;
 }
@@ -594,6 +852,15 @@ cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
 		rte_free(pattern->actions);
 	}
 	rte_free(parser->patterns);
+	for (i = 0; i < parser->mr_size; i++) {
+		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
+
+		if (!mr)
+			continue;
+		rte_free(mr->key.actions);
+		rte_free(mr->action.mod.layout);
+	}
+	rte_free(parser->modifications);
 	rte_free(parser);
 
 	return 0;
@@ -610,6 +877,17 @@ cpfl_get_items_length(const struct rte_flow_item *items)
 	return length;
 }
 
+static int
+cpfl_get_actions_length(const struct rte_flow_action *actions)
+{
+	int length = 0;
+	const struct rte_flow_action *action = actions;
+
+	while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END)
+		continue;
+	return length;
+}
+
 static int
 cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
 		       uint16_t offset, uint8_t *fv)
@@ -638,7 +916,7 @@ cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item
 				break;
 			}
 			layer++;
-		} /* TODO: more type... */
+		}
 	}
 
 	return 0;
@@ -1227,6 +1505,260 @@ cpfl_flow_parse_items(struct cpfl_itf *itf,
 	return -EINVAL;
 }
 
+/* modifications rules */
+static int
+cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap *encap,
+			       const struct rte_flow_action *action)
+{
+	const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+	struct rte_flow_item *definition;
+	int def_length, i, proto_size;
+
+	action_vxlan_encap = (const struct rte_flow_action_vxlan_encap *)action->conf;
+	definition = action_vxlan_encap->definition;
+	def_length = cpfl_get_items_length(definition);
+	proto_size = encap->proto_size;
+	if (proto_size != def_length - 1) {
+		PMD_DRV_LOG(DEBUG, "protocols not match.");
+		return -EINVAL;
+	}
+	for (i = 0; i < proto_size; i++) {
+		enum rte_flow_item_type proto;
+
+		proto = encap->protocols[i];
+		if (proto == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) {
+				PMD_DRV_LOG(DEBUG, "protocols not match.");
+				return -EINVAL;
+			}
+		} else if (proto != definition[i].type) {
+			PMD_DRV_LOG(DEBUG, "protocols not match.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* check and parse */
+static int
+cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int size,
+			 const struct rte_flow_action *actions,
+			 struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int actions_length, i;
+	int j = 0;
+	int ret;
+
+	actions_length = cpfl_get_actions_length(actions);
+	if (size > actions_length - 1)
+		return -EINVAL;
+	for (i = 0; i < size; i++) {
+		enum rte_flow_action_type type;
+		struct cpfl_flow_js_mr_key_action *key_act;
+
+		key_act = &key_acts[i];
+		/* mr->key->actions->type */
+		type = key_act->type;
+		/* mr->key->actions->data */
+		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			int proto_size, k;
+			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
+
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+			mr_key_action[i].encap.action = &actions[j];
+			encap = &mr_key_action[i].encap;
+
+			proto_size = key_act->encap.proto_size;
+			encap->proto_size = proto_size;
+			for (k = 0; k < proto_size; k++) {
+				enum rte_flow_item_type proto;
+
+				proto = key_act->encap.protocols[k];
+				encap->protocols[k] = proto;
+			}
+			ret = cpfl_check_actions_vxlan_encap(encap, &actions[j]);
+			if (ret < 0)
+				return -EINVAL;
+			j++;
+		} else if (type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+			j++;
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+/* output: uint8_t *buffer, uint16_t *byte_len */
+static int
+cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
+		  struct cpfl_flow_mr_key_action *mr_key_action,
+		  uint8_t *buffer, uint16_t *byte_len)
+{
+	int i;
+	int start = 0;
+
+	for (i = 0; i < layout_size; i++) {
+		int index, size, offset;
+		const char *hint;
+		const uint8_t *addr = NULL;
+		struct cpfl_flow_mr_key_action *temp;
+		struct cpfl_flow_js_mr_layout *layout;
+
+		layout = &layouts[i];
+		/* index links to the element of the actions array. */
+		index = layout->index;
+		size = layout->size;
+		offset = layout->offset;
+		if (index == -1) {
+			hint = "dummpy";
+			start += size;
+			continue;
+		}
+		hint = layout->hint;
+		temp = mr_key_action + index;
+		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+			struct rte_flow_item *definition;
+			int def_length, k;
+
+			action_vxlan_encap =
+			    (const struct rte_flow_action_vxlan_encap *)temp->encap.action->conf;
+			definition = action_vxlan_encap->definition;
+			def_length = cpfl_get_items_length(definition);
+			for (k = 0; k < def_length - 1; k++) {
+				if ((strcmp(hint, "eth") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_ETH) ||
+				    (strcmp(hint, "ipv4") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+				    (strcmp(hint, "udp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_UDP) ||
+				    (strcmp(hint, "tcp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_TCP) ||
+				    (strcmp(hint, "vxlan") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_VXLAN)) {
+					addr = (const uint8_t *)(definition[k].spec);
+					if (start > 255) {
+						*byte_len = 0;
+						PMD_DRV_LOG(ERR, "byte length is too long: %s",
+							    hint);
+						return -EINVAL;
+					}
+					memcpy(buffer + start, addr + offset, size);
+					break;
+				} /* TODO: more hint... */
+			}
+			if (k == def_length - 1) {
+				*byte_len = 0;
+				PMD_DRV_LOG(ERR, "can not find corresponding hint: %s", hint);
+				return -EINVAL;
+			}
+		} else {
+			*byte_len = 0;
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", temp->type);
+			return -EINVAL;
+		} /* else TODO: more type... */
+		start += size;
+	}
+	*byte_len = start;
+
+	return 0;
+}
+
+static int
+cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action,
+		     struct cpfl_flow_mr_key_action *mr_key_action,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	enum cpfl_flow_mr_action_type type;
+
+	/* mr->action->type */
+	type = action->type;
+	/* mr->action->data */
+	if (type == CPFL_JS_MR_ACTION_TYPE_MOD) {
+		struct cpfl_flow_js_mr_layout *layout;
+
+		mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		mr_action->mod.byte_len = 0;
+		mr_action->mod.prof = action->mod.prof;
+		layout = action->mod.layout;
+		if (!layout)
+			return 0;
+		memset(mr_action->mod.data, 0, sizeof(mr_action->mod.data));
+
+		return cpfl_parse_layout(layout, action->mod.layout_size, mr_key_action,
+					 mr_action->mod.data, &mr_action->mod.byte_len);
+	}
+	PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+
+	return -EINVAL;
+}
+
+static int
+cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action *actions,
+		   struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int key_action_size;
+
+	/* mr->key->actions */
+	key_action_size = mr->key.actions_size;
+	return cpfl_parse_mr_key_action(mr->key.actions, key_action_size, actions, mr_key_action);
+}
+
+/* output: struct cpfl_flow_mr_action *mr_action */
+static int
+cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	int i;
+	struct cpfl_flow_mr_key_action mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0};
+
+	for (i = 0; i < parser->mr_size; i++) {
+		int ret;
+		struct cpfl_flow_js_mr *mr;
+
+		mr = &parser->modifications[i];
+		if (!mr)
+			return -EINVAL;
+		ret = cpfl_check_mod_key(mr, actions, mr_key_action);
+		if (ret < 0)
+			continue;
+		/* mr->action */
+		return cpfl_parse_mr_action(&mr->action, mr_key_action, mr_action);
+	}
+
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+			struct cpfl_flow_mr_action *mr_action)
+{
+	/* modifications rules */
+	if (!parser->modifications) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	return cpfl_parse_mod_rules(parser, actions, mr_action);
+}
+
 bool
 cpfl_metadata_write_port_id(struct cpfl_itf *itf)
 {
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
index 367a6da574..b7bf21bd76 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.h
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -105,9 +105,79 @@ struct cpfl_flow_js_pr {
 	uint16_t actions_size;
 };
 
+/* Modification Rules Storage */
+/**
+ * The vxlan_encap action matches RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
+ * and include a sequence of protocol headers defined in field protocols
+ * of data.
+ */
+struct cpfl_flow_js_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	int proto_size;
+};
+
+/* A set of modification rte_flow_action_xxx objects can be defined as a type / data pair. */
+struct cpfl_flow_js_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_js_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_js_mr_key {
+	struct cpfl_flow_js_mr_key_action *actions;
+	int actions_size;
+};
+
+struct cpfl_flow_js_mr_layout {
+	int index;				/* links to the element of the actions array */
+	char hint[CPFL_FLOW_JSON_STR_SIZE_MAX]; /* where the data to copy from */
+	uint16_t offset;			/* the start byte of the data to copy from */
+	uint16_t size; /*  bytes of the data to be copied to the memory region */
+};
+
+/** For mod data, besides the profile ID, a layout array defines a set of hints that helps
+ * driver composing the MOD memory region when the action need to insert/update some packet
+ * data from user input.
+ */
+struct cpfl_flow_js_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	struct cpfl_flow_js_mr_layout *layout;
+	int layout_size;
+};
+
+enum cpfl_flow_mr_action_type {
+	CPFL_JS_MR_ACTION_TYPE_MOD,
+};
+
+/** Currently, the type can only be mod.
+ *
+ * For mod data, besides the profile ID, a layout array defines a set
+ * of hints that helps driver composing the MOD memory region when the
+ * action need to insert/update some packet data from user input.
+ */
+struct cpfl_flow_js_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_js_mr_action_mod mod;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD to parse rte_flow modification
+ * actions. Each rule be described by a pair of key and action
+ */
+struct cpfl_flow_js_mr {
+	struct cpfl_flow_js_mr_key key;
+	struct cpfl_flow_js_mr_action action;
+};
+
 struct cpfl_flow_js_parser {
 	struct cpfl_flow_js_pr *patterns;
 	int pr_size;
+	struct cpfl_flow_js_mr *modifications;
+	int mr_size;
 };
 
 /* Pattern Rules */
@@ -125,6 +195,33 @@ struct cpfl_flow_pr_action {
 	};
 };
 
+/* Modification Rules */
+struct cpfl_flow_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	uint16_t proto_size;
+	const struct rte_flow_action *action;
+};
+
+struct cpfl_flow_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	uint8_t data[256];
+};
+
+struct cpfl_flow_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_mr_action_mod mod;
+	};
+};
+
 int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
 int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
 int cpfl_flow_parse_items(struct cpfl_itf *itf,
@@ -132,6 +229,9 @@ int cpfl_flow_parse_items(struct cpfl_itf *itf,
 			  const struct rte_flow_item *items,
 			  const struct rte_flow_attr *attr,
 			  struct cpfl_flow_pr_action *pr_action);
+int cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser,
+			    const struct rte_flow_action *actions,
+			    struct cpfl_flow_mr_action *mr_action);
 bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
 bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
 bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v6 3/8] net/cpfl: set up rte flow skeleton
  2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
  2023-08-22  1:02         ` [PATCH v6 1/8] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
  2023-08-22  1:02         ` [PATCH v6 2/8] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
@ 2023-08-22  1:02         ` Zhang, Yuying
  2023-08-22  1:02         ` [PATCH v6 4/8] net/cpfl: set up control path Zhang, Yuying
                           ` (5 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-22  1:02 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Set up the rte_flow backend skeleton. Introduce the framework
to support different engines as rte_flow backend. Bridge rte_flow
driver API to flow engines.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  53 ++++++
 drivers/net/cpfl/cpfl_ethdev.h |   5 +
 drivers/net/cpfl/cpfl_flow.c   | 339 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow.h   |  85 +++++++++
 drivers/net/cpfl/meson.build   |   1 +
 5 files changed, 483 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 1745f703c8..c350728861 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -15,6 +15,7 @@
 #include "cpfl_ethdev.h"
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1074,6 +1075,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+	struct rte_flow *p_flow;
+
+	while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+		TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+		if (p_flow->engine->free)
+			p_flow->engine->free(p_flow);
+		rte_free(p_flow);
+	}
+}
+
 static int
 cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
 {
@@ -1105,6 +1119,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
 		cpfl_p2p_queue_grps_del(vport);
 
+	cpfl_flow_free(cpfl_vport);
 	idpf_vport_deinit(vport);
 	rte_free(cpfl_vport->p2p_q_chunks_info);
 
@@ -1117,6 +1132,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+		      const struct rte_flow_ops **ops)
+{
+	struct cpfl_itf *itf;
+
+	if (!dev)
+		return -EINVAL;
+
+	itf = CPFL_DEV_TO_ITF(dev);
+
+	/* only vport support rte_flow */
+	if (itf->type != CPFL_ITF_TYPE_VPORT)
+		return -ENOTSUP;
+#ifdef RTE_HAS_JANSSON
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static int
 cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 			    size_t len, uint32_t tx)
@@ -1318,6 +1356,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.xstats_get			= cpfl_dev_xstats_get,
 	.xstats_get_names		= cpfl_dev_xstats_get_names,
 	.xstats_reset			= cpfl_dev_xstats_reset,
+	.flow_ops_get			= cpfl_dev_flow_ops_get,
 	.hairpin_cap_get		= cpfl_hairpin_cap_get,
 	.rx_hairpin_queue_setup		= cpfl_rx_hairpin_queue_setup,
 	.tx_hairpin_queue_setup		= cpfl_tx_hairpin_queue_setup,
@@ -2021,6 +2060,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+#ifdef RTE_HAS_JANSSON
+	ret = cpfl_flow_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init flow module");
+		goto err_flow_init;
+	}
+#endif
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2028,6 +2074,9 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+#ifdef RTE_HAS_JANSSON
+err_flow_init:
+#endif
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2182,6 +2231,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
 	cpfl_vport->itf.adapter = adapter;
 	cpfl_vport->itf.data = dev->data;
+	TAILQ_INIT(&cpfl_vport->itf.flow_list);
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
@@ -2262,6 +2312,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+#ifdef RTE_HAS_JANSSON
+	cpfl_flow_uninit(adapter);
+#endif
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 383dbd14c6..69bf32cfbd 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -140,9 +140,12 @@ enum cpfl_itf_type {
 	CPFL_ITF_TYPE_REPRESENTOR,
 };
 
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
+	struct cpfl_flow_list flow_list;
 	void *data;
 };
 
@@ -206,6 +209,8 @@ struct cpfl_adapter_ext {
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
 
+	struct cpfl_flow_js_parser *flow_parser;
+
 	struct cpfl_metadata meta;
 };
 
diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c
new file mode 100644
index 0000000000..03dd1ffa44
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.c
@@ -0,0 +1,339 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+	TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->parse_pattern_action)
+			continue;
+
+		if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+			continue;
+		return engine;
+	}
+
+	return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+	int ret;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->init) {
+			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+				     engine->type);
+			return -ENOTSUP;
+		}
+
+		ret = engine->init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+				     engine->type);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (engine->uninit)
+			engine->uninit(adapter);
+	}
+}
+
+static int
+cpfl_flow_attr_valid(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	if (attr->priority > CPFL_PREC_MAX) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Only support priority 0-7.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	int ret;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = cpfl_flow_attr_valid(attr, error);
+	if (ret)
+		return ret;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+__cpfl_flow_validate(struct rte_eth_dev *dev,
+		     const struct rte_flow_attr *attr,
+		     const struct rte_flow_item pattern[],
+		     const struct rte_flow_action actions[],
+		     void **meta,
+		     struct cpfl_flow_engine **engine,
+		     struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+	if (ret)
+		return ret;
+
+	*engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta);
+	if (!*engine) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched engine.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	int ret;
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error);
+
+	return ret;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_engine *engine = NULL;
+	struct rte_flow *flow;
+	void *meta;
+	int ret;
+
+	flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return NULL;
+	}
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	if (!engine->create) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched flow creation function");
+		rte_free(flow);
+		return NULL;
+	}
+
+	ret = engine->create(dev, flow, meta, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	flow->engine = engine;
+	TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+	return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	int ret = 0;
+
+	if (!flow || !flow->engine || !flow->engine->destroy) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	ret = flow->engine->destroy(dev, flow, error);
+	if (!ret)
+		TAILQ_REMOVE(&itf->flow_list, flow, next);
+	else
+		PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+	return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct rte_flow *p_flow;
+	void *temp;
+	int ret = 0;
+
+	RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+		ret = cpfl_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to flush flows");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		const struct rte_flow_action *actions,
+		void *data,
+		struct rte_flow_error *error)
+{
+	struct rte_flow_query_count *count = data;
+	int ret = -EINVAL;
+
+	if (!flow || !flow->engine || !flow->engine->query_count) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			ret = flow->engine->query_count(dev, flow, count, error);
+			break;
+		default:
+			ret = rte_flow_error_set(error, ENOTSUP,
+						 RTE_FLOW_ERROR_TYPE_ACTION,
+						 actions,
+						 "action not supported");
+			break;
+		}
+	}
+
+	return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+	.validate = cpfl_flow_validate,
+	.create = cpfl_flow_create,
+	.destroy = cpfl_flow_destroy,
+	.flush = cpfl_flow_flush,
+	.query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+	int ret;
+
+	if (ad->devargs.flow_parser[0] == '\0') {
+		PMD_INIT_LOG(WARNING, "flow module is not initialized");
+		return 0;
+	}
+
+	ret = cpfl_flow_engine_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init flow engines");
+		goto err;
+	}
+
+	ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to create flow parser");
+		goto err;
+	}
+
+	return ret;
+
+err:
+	cpfl_flow_engine_uninit(ad);
+	return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+	if (ad->devargs.flow_parser[0] == '\0')
+		return;
+
+	cpfl_parser_destroy(ad->flow_parser);
+	cpfl_flow_engine_uninit(ad);
+}
diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h
new file mode 100644
index 0000000000..8c19b853ca
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.h
@@ -0,0 +1,85 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#define CPFL_PREC_MAX 7
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+	CPFL_FLOW_ENGINE_NONE = 0,
+	CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+			       struct rte_flow *flow,
+			       void *meta,
+			       struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+				struct rte_flow *flow,
+				struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+			      struct rte_flow *flow,
+			      struct rte_flow_query_count *count,
+			      struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+					     const struct rte_flow_attr *attr,
+					     const struct rte_flow_item pattern[],
+					     const struct rte_flow_action actions[],
+					     void **meta);
+
+struct cpfl_flow_engine {
+	TAILQ_ENTRY(cpfl_flow_engine) node;
+	enum cpfl_flow_engine_type type;
+	engine_init_t init;
+	engine_uninit_t uninit;
+	engine_create_t create;
+	engine_destroy_t destroy;
+	engine_query_t query_count;
+	engine_free_t free;
+	engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) next;
+	struct cpfl_flow_engine *engine;
+	void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev,
+						const struct rte_flow_attr *attr,
+						const struct rte_flow_item pattern[],
+						const struct rte_flow_action actions[],
+						void **meta);
+int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+				  const struct rte_flow_attr *attr,
+				  const struct rte_flow_item pattern[],
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+		    struct rte_flow *flow,
+		    const struct rte_flow_action *actions,
+		    void *data,
+		    struct rte_flow_error *error);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d767818eb7..f5654d5b0e 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -41,6 +41,7 @@ endif
 
 if dpdk_conf.has('RTE_HAS_JANSSON')
     sources += files(
+	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
     )
     ext_deps += jansson_dep
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v6 4/8] net/cpfl: set up control path
  2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
                           ` (2 preceding siblings ...)
  2023-08-22  1:02         ` [PATCH v6 3/8] net/cpfl: set up rte flow skeleton Zhang, Yuying
@ 2023-08-22  1:02         ` Zhang, Yuying
  2023-08-22  1:02         ` [PATCH v6 5/8] net/cpfl: add FXP low level implementation Zhang, Yuying
                           ` (4 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-22  1:02 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Set up control vport and control queue for flow offloading.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_controlq.c | 803 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  75 +++
 drivers/net/cpfl/cpfl_ethdev.c   | 270 +++++++++++
 drivers/net/cpfl/cpfl_ethdev.h   |  14 +
 drivers/net/cpfl/cpfl_vchnl.c    | 144 ++++++
 drivers/net/cpfl/meson.build     |   1 +
 6 files changed, 1307 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h

diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
new file mode 100644
index 0000000000..ed76282b0c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -0,0 +1,803 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include "cpfl_controlq.h"
+#include "base/idpf_controlq.h"
+#include "rte_common.h"
+
+/**
+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
+ * @qinfo: pointer to create control queue info struct
+ *
+ * Verify that DMA parameter of each DMA memory struct is present and
+ * consistent with control queue parameters
+ */
+static inline int
+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
+{
+	struct idpf_dma_mem *ring = &qinfo->ring_mem;
+	struct idpf_dma_mem *buf = &qinfo->buf_mem;
+
+	if (!ring->va || !ring->size)
+		return -EBADR;
+
+	if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
+		return -EINVAL;
+
+	/* no need for buffer checks for TX queues */
+	if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
+		return 0;
+
+	if (!buf->va || !buf->size)
+		return -EBADR;
+
+	/* accommodate different types of rx ring buffer sizes */
+	if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
+	     buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
+	    (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
+	     buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
+		return -EBADR;
+
+	return 0;
+}
+
+/**
+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ * @qinfo: pointer to create queue info struct
+ *
+ * The CP takes care of all DMA memory allocations. Store the allocated memory
+ * information for the descriptor ring and buffers. If the memory for either the
+ * descriptor ring or the buffers is not allocated properly and/or inconsistent
+ * with the control queue parameters, this routine will free the memory for
+ * both the descriptors and the buffers
+ */
+int
+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
+			 struct cpfl_ctlq_create_info *qinfo)
+{
+	int ret_code = 0;
+	unsigned int elem_size;
+	int i = 0;
+
+	ret_code = cpfl_check_dma_mem_parameters(qinfo);
+	if (ret_code)
+		/* TODO: Log an error message per CP */
+		goto err;
+
+	cq->desc_ring.va = qinfo->ring_mem.va;
+	cq->desc_ring.pa = qinfo->ring_mem.pa;
+	cq->desc_ring.size = qinfo->ring_mem.size;
+
+	switch (cq->cq_type) {
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_RDMA_RX:
+		/* Only receive queues will have allocated buffers
+		 * during init.  CP allocates one big chunk of DMA
+		 * region who size is equal to ring_len * buff_size.
+		 * In CPFLib, the block gets broken down to multiple
+		 * smaller blocks that actually gets programmed in the hardware.
+		 */
+
+		cq->bi.rx_buff = (struct idpf_dma_mem **)
+			idpf_calloc(hw, cq->ring_size,
+				    sizeof(struct idpf_dma_mem *));
+		if (!cq->bi.rx_buff) {
+			ret_code = -ENOMEM;
+			/* TODO: Log an error message per CP */
+			goto err;
+		}
+
+		elem_size = qinfo->buf_size;
+		for (i = 0; i < cq->ring_size; i++) {
+			cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
+					    (hw, 1,
+					     sizeof(struct idpf_dma_mem));
+			if (!cq->bi.rx_buff[i]) {
+				ret_code = -ENOMEM;
+				goto free_rx_buffs;
+			}
+			cq->bi.rx_buff[i]->va =
+			    (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));
+			cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
+					       (i * elem_size);
+			cq->bi.rx_buff[i]->size = elem_size;
+		}
+		break;
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_RDMA_TX:
+	case IDPF_CTLQ_TYPE_RDMA_COMPL:
+		break;
+	default:
+		ret_code = -EBADR;
+	}
+
+	return ret_code;
+
+free_rx_buffs:
+	i--;
+	for (; i >= 0; i--)
+		idpf_free(hw, cq->bi.rx_buff[i]);
+
+	if (!cq->bi.rx_buff)
+		idpf_free(hw, cq->bi.rx_buff);
+
+err:
+	return ret_code;
+}
+
+/**
+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	for (i = 0; i < cq->ring_size; i++) {
+		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+		/* No buffer to post to descriptor, continue */
+		if (!bi)
+			continue;
+
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+		desc->opcode = 0;
+		desc->datalen = CPU_TO_LE16(bi->size);
+		desc->ret_val = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+		desc->params.indirect.param0 = 0;
+		desc->params.indirect.param1 = 0;
+	}
+}
+
+/**
+ * cpfl_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
+{
+	/* set control queue registers in our local struct */
+	cq->reg.head = q_create_info->reg.head;
+	cq->reg.tail = q_create_info->reg.tail;
+	cq->reg.len = q_create_info->reg.len;
+	cq->reg.bah = q_create_info->reg.bah;
+	cq->reg.bal = q_create_info->reg.bal;
+	cq->reg.len_mask = q_create_info->reg.len_mask;
+	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+	cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * cpfl_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
+{
+	/* Update tail to post pre-allocated buffers for rx queues */
+	if (is_rxq)
+		wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
+
+	/* For non-Mailbox control queues only TAIL need to be set */
+	if (cq->q_id != -1)
+		return;
+
+	/* Clear Head for both send or receive */
+	wr32(hw, cq->reg.head, 0);
+
+	/* set starting point */
+	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
+ * @hw: context info for the callback
+ * @cq: pointer to the specific control queue
+ *
+ * DMA buffers are released by the CP itself
+ */
+static void
+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
+{
+	int i;
+
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
+	    cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
+		for (i = 0; i < cq->ring_size; i++)
+			idpf_free(hw, cq->bi.rx_buff[i]);
+		/* free the buffer header */
+		idpf_free(hw, cq->bi.rx_buff);
+	} else {
+		idpf_free(hw, cq->bi.tx_msg);
+	}
+}
+
+/**
+ * cpfl_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ */
+int
+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+	      struct idpf_ctlq_info **cq_out)
+{
+	struct idpf_ctlq_info *cq;
+	bool is_rxq = false;
+	int status = 0;
+
+	if (!qinfo->len || !qinfo->buf_size ||
+	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+		return -EINVAL;
+
+	cq = (struct idpf_ctlq_info *)
+	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+
+	if (!cq)
+		return -ENOMEM;
+
+	cq->cq_type = qinfo->type;
+	cq->q_id = qinfo->id;
+	cq->buf_size = qinfo->buf_size;
+	cq->ring_size = qinfo->len;
+
+	cq->next_to_use = 0;
+	cq->next_to_clean = 0;
+	cq->next_to_post = cq->ring_size - 1;
+
+	switch (qinfo->type) {
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+		is_rxq = true;
+		/* fallthrough */
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+		status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
+		break;
+
+	default:
+		status = -EINVAL;
+		break;
+	}
+
+	if (status)
+		goto init_free_q;
+
+	if (is_rxq) {
+		cpfl_ctlq_init_rxq_bufs(cq);
+	} else {
+		/* Allocate the array of msg pointers for TX queues */
+		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
+			idpf_calloc(hw, qinfo->len,
+				    sizeof(struct idpf_ctlq_msg *));
+		if (!cq->bi.tx_msg) {
+			status = -ENOMEM;
+			goto init_dealloc_q_mem;
+		}
+	}
+
+	cpfl_ctlq_setup_regs(cq, qinfo);
+
+	cpfl_ctlq_init_regs(hw, cq, is_rxq);
+
+	idpf_init_lock(&cq->cq_lock);
+
+	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
+
+	*cq_out = cq;
+	return status;
+
+init_dealloc_q_mem:
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+	idpf_free(hw, cq);
+	cq = NULL;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+	       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_desc *desc;
+	int num_desc_avail = 0;
+	int status = 0;
+	int i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	/* Ensure there are enough descriptors to send all messages */
+	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+		status = -ENOSPC;
+		goto sq_send_command_out;
+	}
+
+	for (i = 0; i < num_q_msg; i++) {
+		struct idpf_ctlq_msg *msg = &q_msg[i];
+		uint64_t msg_cookie;
+
+		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+		desc->opcode = CPU_TO_LE16(msg->opcode);
+		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+		msg_cookie = *(uint64_t *)&msg->cookie;
+		desc->cookie_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));
+		desc->cookie_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));
+		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+				IDPF_CTLQ_FLAG_HOST_ID_S);
+		if (msg->data_len) {
+			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+			desc->datalen |= CPU_TO_LE16(msg->data_len);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+			/* Update the address values in the desc with the pa
+			 * value for respective buffer
+			 */
+			desc->params.indirect.addr_high =
+				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+			desc->params.indirect.addr_low =
+				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		} else {
+			idpf_memcpy(&desc->params, msg->ctx.direct,
+				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		}
+
+		/* Store buffer info */
+		cq->bi.tx_msg[cq->next_to_use] = msg;
+		(cq->next_to_use)++;
+		if (cq->next_to_use == cq->ring_size)
+			cq->next_to_use = 0;
+	}
+
+	/* Force memory write to complete before letting hardware
+	 * know that there are new descriptors to fetch.
+	 */
+	idpf_wmb();
+	wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+	idpf_release_lock(&cq->cq_lock);
+
+	return status;
+}
+
+/**
+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
+ * back for the requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ * @force: (input) clean descriptors which were not done yet. Use with caution
+ * in kernel mode only
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+static int
+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		     struct idpf_ctlq_msg *msg_status[], bool force)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t i = 0, num_to_clean;
+	uint16_t ntc, desc_err;
+	int ret = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*clean_count == 0)
+		return 0;
+	if (*clean_count > cq->ring_size)
+		return -EINVAL;
+
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *clean_count;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		desc_err = LE16_TO_CPU(desc->ret_val);
+		if (desc_err) {
+			/* strip off FW internal code */
+			desc_err &= 0xff;
+		}
+
+		msg_status[i] = cq->bi.tx_msg[ntc];
+		if (!msg_status[i])
+			break;
+		msg_status[i]->status = desc_err;
+		cq->bi.tx_msg[ntc] = NULL;
+		/* Zero out any stale data */
+		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	}
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+
+	/* Return number of descriptors actually cleaned */
+	*clean_count = i;
+
+	return ret;
+}
+
+/**
+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		   struct idpf_ctlq_msg *msg_status[])
+{
+	return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
+}
+
+/**
+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t ntp = cq->next_to_post;
+	bool buffs_avail = false;
+	uint16_t tbp = ntp + 1;
+	int status = 0;
+	int i = 0;
+
+	if (*buff_count > cq->ring_size)
+		return -EINVAL;
+
+	if (*buff_count > 0)
+		buffs_avail = true;
+	idpf_acquire_lock(&cq->cq_lock);
+	if (tbp >= cq->ring_size)
+		tbp = 0;
+
+	if (tbp == cq->next_to_clean)
+		/* Nothing to do */
+		goto post_buffs_out;
+
+	/* Post buffers for as many as provided or up until the last one used */
+	while (ntp != cq->next_to_clean) {
+		desc = IDPF_CTLQ_DESC(cq, ntp);
+		if (cq->bi.rx_buff[ntp])
+			goto fill_desc;
+		if (!buffs_avail) {
+			/* If the caller hasn't given us any buffers or
+			 * there are none left, search the ring itself
+			 * for an available buffer to move to this
+			 * entry starting at the next entry in the ring
+			 */
+			tbp = ntp + 1;
+			/* Wrap ring if necessary */
+			if (tbp >= cq->ring_size)
+				tbp = 0;
+
+			while (tbp != cq->next_to_clean) {
+				if (cq->bi.rx_buff[tbp]) {
+					cq->bi.rx_buff[ntp] =
+						cq->bi.rx_buff[tbp];
+					cq->bi.rx_buff[tbp] = NULL;
+
+					/* Found a buffer, no need to
+					 * search anymore
+					 */
+					break;
+				}
+
+				/* Wrap ring if necessary */
+				tbp++;
+				if (tbp >= cq->ring_size)
+					tbp = 0;
+			}
+
+			if (tbp == cq->next_to_clean)
+				goto post_buffs_out;
+		} else {
+			/* Give back pointer to DMA buffer */
+			cq->bi.rx_buff[ntp] = buffs[i];
+			i++;
+
+			if (i >= *buff_count)
+				buffs_avail = false;
+		}
+
+fill_desc:
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+		/* Post buffers to descriptor */
+		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+		ntp++;
+		if (ntp == cq->ring_size)
+			ntp = 0;
+	}
+
+post_buffs_out:
+	/* Only update tail if buffers were actually posted */
+	if (cq->next_to_post != ntp) {
+		if (ntp)
+			/* Update next_to_post to ntp - 1 since current ntp
+			 * will not have a buffer
+			 */
+			cq->next_to_post = ntp - 1;
+		else
+			/* Wrap to end of end ring since current ntp is 0 */
+			cq->next_to_post = cq->ring_size - 1;
+
+		wr32(hw, cq->reg.tail, cq->next_to_post);
+	}
+
+	idpf_release_lock(&cq->cq_lock);
+	/* return the number of buffers that were not posted */
+	*buff_count = *buff_count - i;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+	       struct idpf_ctlq_msg *q_msg)
+{
+	uint16_t num_to_clean, ntc, ret_val, flags;
+	struct idpf_ctlq_desc *desc;
+	int ret_code = 0;
+	uint16_t i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*num_q_msg == 0)
+		return 0;
+	else if (*num_q_msg > cq->ring_size)
+		return -EINVAL;
+
+	/* take the lock before we start messing with the ring */
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *num_q_msg;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		flags = LE16_TO_CPU(desc->flags);
+		if (!(flags & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		ret_val = LE16_TO_CPU(desc->ret_val);
+		q_msg[i].vmvf_type = (flags &
+				     (IDPF_CTLQ_FLAG_FTYPE_VM |
+				      IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+				      IDPF_CTLQ_FLAG_FTYPE_S;
+
+		if (flags & IDPF_CTLQ_FLAG_ERR)
+			ret_code = -EBADMSG;
+
+		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
+		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
+		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+		q_msg[i].status = ret_val;
+
+		if (desc->datalen) {
+			idpf_memcpy(q_msg[i].ctx.indirect.context,
+				    &desc->params.indirect,
+				    IDPF_INDIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+
+			/* Assign pointer to dma buffer to ctlq_msg array
+			 * to be given to upper layer
+			 */
+			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+			/* Zero out pointer to DMA buffer info;
+			 * will be repopulated by post buffers API
+			 */
+			cq->bi.rx_buff[ntc] = NULL;
+		} else {
+			idpf_memcpy(q_msg[i].ctx.direct,
+				    desc->params.raw,
+				    IDPF_DIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+		}
+
+		/* Zero out stale data in descriptor */
+		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+			    IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	};
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+	*num_q_msg = i;
+	if (*num_q_msg == 0)
+		ret_code = -ENOMSG;
+
+	return ret_code;
+}
+
+int
+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+		    struct idpf_ctlq_info **cq)
+{
+	return cpfl_ctlq_add(hw, qinfo, cq);
+}
+
+/**
+ * cpfl_ctlq_shutdown - shutdown the CQ
+ * The main shutdown routine for any controq queue
+ */
+static void
+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (!cq->ring_size)
+		goto shutdown_sq_out;
+
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+
+	/* Set ring_size to 0 to indicate uninitialized queue */
+	cq->ring_size = 0;
+
+shutdown_sq_out:
+	idpf_release_lock(&cq->cq_lock);
+	idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * cpfl_ctlq_remove - deallocate and remove specified control queue
+ */
+static void
+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	LIST_REMOVE(cq, cq_list);
+	cpfl_ctlq_shutdown(hw, cq);
+	idpf_free(hw, cq);
+}
+
+void
+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	cpfl_ctlq_remove(hw, cq);
+}
+
+int
+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		     struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			      uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
+
+int
+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			 struct idpf_ctlq_msg *msg_status[])
+{
+	return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
new file mode 100644
index 0000000000..740ae6522c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CONTROLQ_H_
+#define _CPFL_CONTROLQ_H_
+
+#include "base/idpf_osdep.h"
+#include "base/idpf_controlq_api.h"
+
+#define CPFL_CTLQ_DESCRIPTOR_SIZE	32
+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE	4096
+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE	256
+#define CPFL_DFLT_MBX_RING_LEN		512
+#define CPFL_CFGQ_RING_LEN		512
+
+/* CRQ/CSQ specific error codes */
+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */
+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */
+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */
+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */
+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct cpfl_ctlq_create_info {
+	enum idpf_ctlq_type type;
+	int id; /* absolute queue offset passed as input
+		 * -1 for default mailbox if present
+		 */
+	uint16_t len; /* Queue length passed as input */
+	uint16_t buf_size; /* buffer size passed as input */
+	uint64_t base_address; /* output, HPA of the Queue start  */
+	struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+	/* Pass down previously allocated descriptor ring and buffer memory
+	 * for each control queue to be created
+	 */
+	struct idpf_dma_mem ring_mem;
+	/* The CP will allocate one large buffer that the CPFlib will piece
+	 * into individual buffers for each descriptor
+	 */
+	struct idpf_dma_mem buf_mem;
+
+	int ext_info_size;
+	void *ext_info; /* Specific to q type */
+};
+
+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
+			     struct idpf_ctlq_info *cq,
+			     struct cpfl_ctlq_create_info *qinfo);
+int cpfl_ctlq_add(struct idpf_hw *hw,
+		  struct cpfl_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq);
+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+		       struct idpf_ctlq_msg *msg_status[]);
+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			    u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg);
+int cpfl_vport_ctlq_add(struct idpf_hw *hw,
+			struct cpfl_ctlq_create_info *qinfo,
+			struct idpf_ctlq_info **cq);
+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			 u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+			 struct idpf_ctlq_msg q_msg[]);
+
+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+				  u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+			     struct idpf_ctlq_msg *msg_status[]);
+#endif
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c350728861..a2bc6784d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1698,6 +1698,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 		return;
 	}
 
+	/* ignore if it is ctrl vport */
+	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
+		return;
+
 	vport = cpfl_find_vport(adapter, vc_event->vport_id);
 	if (!vport) {
 		PMD_DRV_LOG(ERR, "Can't find vport.");
@@ -1893,6 +1897,262 @@ cpfl_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 }
 
+static int
+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	ret = cpfl_config_ctlq_tx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
+		return ret;
+	}
+
+	ret = cpfl_config_ctlq_rx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
+		return ret;
+	}
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	int i;
+
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (adapter->ctlqp[i])
+			cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+}
+
+static int
+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_ctlq_info *cfg_cq;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		cfg_cq = NULL;
+		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
+					  &adapter->cfgq_info[i],
+					  &cfg_cq);
+		if (ret || !cfg_cq) {
+			PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d",
+				    adapter->cfgq_info[i].id);
+			cpfl_remove_cfgqs(adapter);
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
+			    adapter->cfgq_info[i].id);
+		adapter->ctlqp[i] = cfg_cq;
+	}
+
+	return ret;
+}
+
+#define CPFL_CFGQ_RING_LEN		512
+#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
+#define CPFL_CFGQ_BUFFER_SIZE		256
+#define CPFL_CFGQ_RING_SIZE		512
+
+static int
+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	struct cpfl_vport *vport;
+	int i, err;
+	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);
+	uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;
+
+	vport = &adapter->ctrl_vport;
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (i % 2 == 0) {
+			/* Setup Tx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +
+				i / 2 * vport->base.chunks_info.tx_qtail_spacing;
+		} else {
+			/* Setup Rx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +
+				i / 2 * vport->base.chunks_info.rx_qtail_spacing;
+			if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,
+						buf_size)) {
+				err = -ENOMEM;
+				goto free_mem;
+			}
+		}
+		if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,
+					ring_size)) {
+			err = -ENOMEM;
+			goto free_mem;
+		}
+	}
+	return 0;
+free_mem:
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+	return err;
+}
+
+static int
+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_create_vport *vport_info =
+		(struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;
+	int i;
+
+	vport->itf.adapter = adapter;
+	vport->base.adapter = &adapter->base;
+	vport->base.vport_id = vport_info->vport_id;
+
+	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+		if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {
+			vport->base.chunks_info.tx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.tx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.tx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {
+			vport->base.chunks_info.rx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.rx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.rx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else {
+			PMD_INIT_LOG(ERR, "Unsupported chunk type");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)
+{
+	cpfl_stop_cfgqs(adapter);
+	cpfl_remove_cfgqs(adapter);
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+}
+
+static int
+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_vc_create_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to create control vport");
+		return ret;
+	}
+
+	ret = cpfl_init_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init control vport");
+		goto err_init_ctrl_vport;
+	}
+
+	ret = cpfl_cfgq_setup(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control queues");
+		goto err_cfgq_setup;
+	}
+
+	ret = cpfl_add_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to add control queues");
+		goto err_add_cfgq;
+	}
+
+	ret = cpfl_start_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to start control queues");
+		goto err_start_cfgqs;
+	}
+
+	return 0;
+
+err_start_cfgqs:
+	cpfl_stop_cfgqs(adapter);
+err_add_cfgq:
+	cpfl_remove_cfgqs(adapter);
+err_cfgq_setup:
+err_init_ctrl_vport:
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+
+	return ret;
+}
+
 static struct virtchnl2_get_capabilities req_caps = {
 	.csum_caps =
 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
@@ -2060,6 +2320,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+	ret = cpfl_ctrl_path_open(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control path");
+		goto err_create_ctrl_vport;
+	}
+
 #ifdef RTE_HAS_JANSSON
 	ret = cpfl_flow_init(adapter);
 	if (ret) {
@@ -2076,7 +2342,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 #ifdef RTE_HAS_JANSSON
 err_flow_init:
+	cpfl_ctrl_path_close(adapter);
 #endif
+err_create_ctrl_vport:
+	rte_free(adapter->vports);
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2315,6 +2584,7 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 #ifdef RTE_HAS_JANSSON
 	cpfl_flow_uninit(adapter);
 #endif
+	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 69bf32cfbd..7f83d170d7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -22,6 +22,7 @@
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
 #include "cpfl_representor.h"
+#include "cpfl_controlq.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -82,6 +83,10 @@
 #define CPFL_META_CHUNK_LENGTH	1024
 #define CPFL_META_LENGTH	32
 
+#define CPFL_RX_CFGQ_NUM	4
+#define CPFL_TX_CFGQ_NUM	4
+#define CPFL_CFGQ_NUM		8
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -212,6 +217,12 @@ struct cpfl_adapter_ext {
 	struct cpfl_flow_js_parser *flow_parser;
 
 	struct cpfl_metadata meta;
+
+	/* ctrl vport and ctrl queues. */
+	struct cpfl_vport ctrl_vport;
+	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
+	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
+	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -226,6 +237,9 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 			   struct cpchnl2_vport_id *vport_id,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_info_response *response);
+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
index a21a4a451f..932840a972 100644
--- a/drivers/net/cpfl/cpfl_vchnl.c
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 
 	return 0;
 }
+
+int
+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct virtchnl2_create_vport vport_msg;
+	struct idpf_cmd_info args;
+	int err = -1;
+
+	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
+	vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+	vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
+	vport_msg.num_tx_complq = 0;
+	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
+	vport_msg.num_rx_bufq = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+	args.in_args = (uint8_t *)&vport_msg;
+	args.in_args_size = sizeof(vport_msg);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+		return err;
+	}
+
+	rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
+		   IDPF_DFLT_MBX_BUF_SIZE);
+	return err;
+}
+
+int
+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_RX_CFGQ_NUM;
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (!vc_rxqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->base.vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		rxq_info = &vc_rxqs->qinfo[i];
+		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
+		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;
+		rxq_info->max_pkt_size = vport->base.max_pkt_len;
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_rxqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_TX_CFGQ_NUM;
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (!vc_txqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->base.vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		txq_info = &vc_txqs->qinfo[i];
+		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
+		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_txqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index f5654d5b0e..290ff1e655 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -18,6 +18,7 @@ sources = files(
         'cpfl_rxtx.c',
         'cpfl_vchnl.c',
         'cpfl_representor.c',
+        'cpfl_controlq.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v6 5/8] net/cpfl: add FXP low level implementation
  2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
                           ` (3 preceding siblings ...)
  2023-08-22  1:02         ` [PATCH v6 4/8] net/cpfl: set up control path Zhang, Yuying
@ 2023-08-22  1:02         ` Zhang, Yuying
  2023-08-22  1:02         ` [PATCH v6 6/8] net/cpfl: add fxp rule module Zhang, Yuying
                           ` (3 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-22  1:02 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add FXP low level implementation for CPFL rte_flow to
create/delete rules.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_actions.h | 858 ++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_rules.c   | 126 +++++
 drivers/net/cpfl/cpfl_rules.h   | 306 ++++++++++++
 drivers/net/cpfl/meson.build    |   1 +
 4 files changed, 1291 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h
new file mode 100644
index 0000000000..7b82119e39
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_actions.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_ACTIONS_H_
+#define _CPFL_ACTIONS_H_
+
+#include "base/idpf_osdep.h"
+
+#pragma pack(1)
+
+union cpfl_action_set {
+	uint32_t data;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 4;
+		uint32_t tag : 1;
+		uint32_t prec : 3;
+	} set_24b_a;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 3;
+		uint32_t tag : 2;
+		uint32_t prec : 3;
+	} set_24b_b;
+
+	struct {
+		uint32_t val : 16;
+		uint32_t idx : 4;
+		uint32_t unused : 6;
+		uint32_t tag : 3;
+		uint32_t prec : 3;
+	} set_16b;
+
+	struct {
+		uint32_t val_a : 8;
+		uint32_t val_b : 8;
+		uint32_t idx_a : 4;
+		uint32_t idx_b : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_8b;
+
+	struct {
+		uint32_t val : 10;
+		uint32_t ena : 10;
+		uint32_t idx : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_1b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} nop;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} chained_24b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} aux_flags;
+};
+
+struct cpfl_action_set_ext {
+#define CPFL_ACTION_SET_EXT_CNT 2
+	union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT];
+};
+
+#pragma pack()
+
+/**
+ * cpfl_act_nop - Encode a NOP action
+ */
+static inline union cpfl_action_set
+cpfl_act_nop(void)
+{
+	union cpfl_action_set act;
+
+	act.data = 0;
+	return act;
+}
+
+/**
+ * cpfl_is_nop_action - Indicate if an action set is a NOP
+ */
+static inline bool
+cpfl_is_nop_action(union cpfl_action_set *act)
+{
+	return act->data == cpfl_act_nop().data;
+}
+
+#define CPFL_MAKE_MASK32(b, s)	((((uint32_t)1 << (b)) - 1) << (s))
+
+#define CPFL_ACT_PREC_MAX	7
+#define CPFL_ACT_PREC_S		29
+#define CPFL_ACT_PREC_M		CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S)
+#define CPFL_ACT_PREC_SET(p)	\
+	(((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M)
+#define CPFL_ACT_PREC_CHECK(p)	((p) > 0 && (p) <= CPFL_ACT_PREC_MAX)
+
+#define CPFL_METADATA_ID_CNT		32	/* Max number of metadata IDs */
+#define CPFL_METADATA_STRUCT_MAX_SZ	128	/* Max metadata size per ID */
+
+/*******************************************************************************
+ * 1-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_1B_OP_S	24
+#define CPFL_ACT_1B_OP_M	CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S)
+#define CPFL_ACT_1B_OP		((uint32_t)(0x01) << CPFL_ACT_1B_OP_S)
+
+#define CPFL_ACT_1B_VAL_S	0
+#define CPFL_ACT_1B_VAL_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S)
+#define CPFL_ACT_1B_EN_S	10
+#define CPFL_ACT_1B_EN_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S)
+#define CPFL_ACT_1B_INDEX_S	20
+#define CPFL_ACT_1B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S)
+
+/* 1-bit actions currently uses only INDEX of 0 */
+#define CPFL_ACT_MAKE_1B(prec, en, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \
+	 ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \
+	 (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M))
+
+enum cpfl_act_1b_op {
+	CPFL_ACT_1B_OP_DROP		= 0x01,
+	CPFL_ACT_1B_OP_HDR_SPLIT	= 0x02,
+	CPFL_ACT_1B_OP_DIR_CHANGE	= 0x04,
+	CPFL_ACT_1B_OP_DEFER_DROP	= 0x08,
+	CPFL_ACT_1B_OP_ORIG_MIR_MD	= 0x80
+};
+
+#define CPFL_ACT_1B_COMMIT_MODE_S	4
+#define CPFL_ACT_1B_COMMIT_MODE_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S)
+
+/**
+ * cpfl_act_commit_mode - action commit mode for certain action classes
+ */
+enum cpfl_act_commit_mode {
+	/* Action processing for the initial classification pass */
+	CPFL_ACT_COMMIT_ALL		= 0, /* Commit all actions */
+	CPFL_ACT_COMMIT_PRE_MOD		= 1, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_NONE		= 2, /* Commit no action */
+	/* Action processing for deferred actions in a recirculation pass */
+	CPFL_ACT_COMMIT_RECIR_ALL	= 4, /* Commit all actions */
+	CPFL_ACT_COMMIT_RECIR_PRE_MOD	= 5, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_RECIR_NONE	= 6  /* Commit no action */
+};
+
+/*******************************************************************************
+ * 8-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_8B_S	24
+#define CPFL_ACT_OP_8B_M	CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S)
+#define CPFL_ACT_OP_8B		((uint32_t)(0x02) << CPFL_ACT_OP_8B_S)
+
+#define CPFL_ACT_8B_A_VAL_S	0
+#define CPFL_ACT_8B_A_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S)
+#define CPFL_ACT_8B_A_INDEX_S	16
+#define CPFL_ACT_8B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S)
+
+#define CPFL_ACT_8B_B_VAL_S	8
+#define CPFL_ACT_8B_B_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S)
+#define CPFL_ACT_8B_B_INDEX_S	20
+#define CPFL_ACT_8B_B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S)
+
+/* Unless combining two 8-bit actions into an action set, both A and B fields
+ * must be the same,
+ */
+#define CPFL_ACT_MAKE_8B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \
+	 (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \
+	 (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \
+	 (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \
+	 (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M))
+
+/* 8-Bit Action Indices */
+#define CPFL_ACT_8B_INDEX_MOD_META		9
+
+/* 8-Bit Action Miscellaneous */
+#define CPFL_ACT_8B_MOD_META_PROF_CNT		16
+#define CPFL_ACT_8B_MOD_META_VALID		0x80
+
+/*******************************************************************************
+ * 16-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_16B_S	26
+#define CPFL_ACT_OP_16B_M	CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S)
+#define CPFL_ACT_OP_16B		((uint32_t)0x1 << CPFL_ACT_OP_16B_S)
+
+#define CPFL_ACT_16B_INDEX_S	16
+#define CPFL_ACT_16B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S)
+#define CPFL_ACT_16B_VAL_S	0
+#define CPFL_ACT_16B_VAL_M	CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S)
+
+#define CPFL_ACT_MAKE_16B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \
+	 (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M))
+
+/* 16-Bit Action Indices */
+#define CPFL_ACT_16B_INDEX_COUNT_SET		0
+#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX	1
+#define CPFL_ACT_16B_INDEX_SET_VSI		2
+#define CPFL_ACT_16B_INDEX_DEL_MD		4
+#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST		5
+
+/* 16-Bit Action Miscellaneous */
+#define CPFL_ACT_16B_COUNT_SET_CNT		2048 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_SET_VSI_SLOTS		2
+#define CPFL_ACT_16B_FWD_VSI_CNT		1032 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_FWD_VSI_LIST_CNT		256
+#define CPFL_ACT_16B_MOD_VSI_LIST_CNT		1024
+#define CPFL_ACT_16B_FWD_PORT_CNT		4
+#define CPFL_ACT_16B_DEL_MD_MID_CNT		32
+#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS		4
+
+/* 16-Bit SET_MCAST_IDX Action */
+#define CPFL_ACT_16B_SET_MCAST_VALID	((uint32_t)1 << 15)
+
+/* 16-Bit SET_VSI Action Variants */
+#define CPFL_ACT_16B_SET_VSI_VAL_S		0
+#define CPFL_ACT_16B_SET_VSI_VAL_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S)
+#define CPFL_ACT_16B_SET_VSI_PE_S		11
+#define CPFL_ACT_16B_SET_VSI_PE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S)
+#define CPFL_ACT_16B_SET_VSI_TYPE_S		14
+#define CPFL_ACT_16B_SET_VSI_TYPE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S)
+
+/* 16-Bit DEL_MD Action */
+#define CPFL_ACT_16B_DEL_MD_0_S		0
+#define CPFL_ACT_16B_DEL_MD_1_S		5
+
+/* 16-Bit MOD_VSI_LIST Actions */
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S	0
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M	\
+	CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S)
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S	14
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S)
+#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \
+	((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \
+	 (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_ID_M))
+
+#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \
+	((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \
+		CPFL_ACT_16B_SET_VSI_TYPE_M) | \
+	 (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \
+		CPFL_ACT_16B_SET_VSI_PE_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \
+		CPFL_ACT_16B_SET_VSI_VAL_M))
+
+enum cpfl_prot_eng {
+	CPFL_PE_LAN = 0,
+	CPFL_PE_RDMA,
+	CPFL_PE_CRT
+};
+
+enum cpfl_act_fwd_type {
+	CPFL_ACT_FWD_VSI,
+	CPFL_ACT_FWD_VSI_LIST,
+	CPFL_ACT_FWD_PORT
+};
+
+/*******************************************************************************
+ * 24-Bit Actions
+ ******************************************************************************/
+/* Group A */
+#define CPFL_ACT_OP_24B_A_S	28
+#define CPFL_ACT_OP_24B_A_M	CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S)
+#define CPFL_ACT_24B_A_INDEX_S	24
+#define CPFL_ACT_24B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S)
+#define CPFL_ACT_24B_A_VAL_S	0
+#define CPFL_ACT_24B_A_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S)
+
+#define CPFL_ACT_OP_24B_A	((uint32_t)1 << CPFL_ACT_OP_24B_A_S)
+
+#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_MOD_ADDR	0
+#define CPFL_ACT_24B_INDEX_MIRROR_FIRST	1
+#define CPFL_ACT_24B_INDEX_COUNT	2
+#define CPFL_ACT_24B_INDEX_SET_Q	8
+#define CPFL_ACT_24B_INDEX_MOD_PROFILE	9
+#define CPFL_ACT_24B_INDEX_METER	10
+
+#define CPFL_ACT_24B_COUNT_SLOTS	6
+#define CPFL_ACT_24B_METER_SLOTS	6
+
+#define CPFL_ACT_24B_MOD_ADDR_CNT	(16 * 1024 * 1024)
+#define CPFL_ACT_24B_COUNT_ID_CNT	((uint32_t)1 << 24)
+#define CPFL_ACT_24B_SET_Q_CNT		(12 * 1024)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS	3
+
+/* 24-Bit SET_Q Action */
+#define CPFL_ACT_24B_SET_Q_Q_S		0
+#define CPFL_ACT_24B_SET_Q_Q_M		\
+	CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_S	14
+#define CPFL_ACT_24B_SET_Q_Q_RGN_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S)
+#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS	CPFL_MAKE_MASK32(1, 17)
+#define CPFL_ACT_24B_SET_Q_DST_PE_S	21
+#define CPFL_ACT_24B_SET_Q_DST_PE_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S)
+#define CPFL_ACT_24B_SET_Q_VALID	CPFL_MAKE_MASK32(1, 23)
+
+/* 24-Bit MOD_PROFILE Action */
+enum cpfl_act_mod_profile_hint {
+	CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */
+};
+
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_S		0
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S)
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S	12
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S)
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_S		14
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S)
+#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS		((uint32_t)1 << 16)
+#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND	((uint32_t)1 << 17)
+#define CPFL_ACT_24B_MOD_PROFILE_VALID			((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES	4
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT		2048
+
+/* 24-Bit METER Actions */
+#define CPFL_ACT_24B_METER_INDEX_S	0
+#define CPFL_ACT_24B_METER_INDEX_M	\
+	CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S)
+#define CPFL_ACT_24B_METER_BANK_S	20
+#define CPFL_ACT_24B_METER_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S)
+#define CPFL_ACT_24B_METER_VALID	((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_METER_BANK_CNT	6
+#define CPFL_ACT_24B_METER_INDEX_CNT	((uint32_t)1 << 20)
+
+/* Group B */
+#define CPFL_ACT_OP_24B_B_S	27
+#define CPFL_ACT_OP_24B_B_M	CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S)
+#define CPFL_ACT_24B_B_INDEX_S	24
+#define CPFL_ACT_24B_B_INDEX_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S)
+#define CPFL_ACT_24B_B_VAL_S	0
+#define CPFL_ACT_24B_B_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S)
+
+#define CPFL_ACT_OP_24B_B	((uint32_t)1 << CPFL_ACT_OP_24B_B_S)
+
+#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_SET_MD	0
+#define CPFL_ACT_24B_INDEX_RANGE_CHECK	6
+#define CPFL_ACT_24B_SET_MD_SLOTS	6
+
+/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */
+/* 8-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD8_VAL_S	0
+#define CPFL_ACT_24B_SET_MD8_VAL_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S)
+#define CPFL_ACT_24B_SET_MD8_MASK_S	8
+#define CPFL_ACT_24B_SET_MD8_MASK_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S)
+#define CPFL_ACT_24B_SET_MD8_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD8_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S)
+/* 16-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD16_VAL_S	0
+#define CPFL_ACT_24B_SET_MD16_VAL_M	\
+	CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_L_S	16 /* For chained action */
+#define CPFL_ACT_24B_SET_MD16_MASK_L_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_H_SR	8
+#define CPFL_ACT_24B_SET_MD16_MASK_H_M	0xff
+#define CPFL_ACT_24B_SET_MD16_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD16_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD16		((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_SET_MD32_VAL_L_M	CPFL_MAKE_MASK32(24, 0)
+
+#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX		15
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX	7
+#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX	15
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX	7
+
+/* RANGE_CHECK Action */
+enum cpfl_rule_act_rc_mode {
+	CPFL_RULE_ACT_RC_1_RANGE = 0,
+	CPFL_RULE_ACT_RC_2_RANGES = 1,
+	CPFL_RULE_ACT_RC_4_RANGES = 2,
+	CPFL_RULE_ACT_RC_8_RANGES = 3
+};
+
+#define CPFL_ACT_24B_RC_TBL_IDX_S	0
+#define CPFL_ACT_24B_RC_TBL_IDX_M	\
+	CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S)
+#define CPFL_ACT_24B_RC_START_BANK_S	13
+#define CPFL_ACT_24B_RC_START_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S)
+#define CPFL_ACT_24B_RC_MODE_S		16
+#define CPFL_ACT_24B_RC_MODE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S)
+#define CPFL_ACT_24B_RC_XTRACT_PROF_S	18
+#define CPFL_ACT_24B_RC_XTRACT_PROF_M	\
+	CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S)
+
+#define CPFL_ACT_24B_RC_TBL_INDEX_CNT	(8 * 1024)
+#define CPFL_ACT_24B_RC_BANK_CNT	8
+#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT	64
+
+/*******************************************************************************
+ * 24-Bit Chained Auxiliary Actions
+ ******************************************************************************/
+
+/* TODO: HAS is being updated.  Revise the order of chained and base action
+ * when the HAS has it finalized.
+ */
+/**
+ * 24-Bit Chained SET_MD Actions
+ *
+ * Chained SET_MD actions consume two consecutive action sets.  The first one is
+ * the chained AUX action set.  The second one is the base/parent action set.
+ * Chained SET_MD actions can add and/or update metadata structure with IDs from
+ * 0 to 31 while the non-chained SET_MD variants can only update existing meta-
+ * data IDs below 16.
+ */
+
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S	8
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M	\
+	CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD_AUX_ADD		((uint32_t)1 << 15)
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S	16
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_S		0
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_M		\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S	0
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S)
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR	24 /* Upper 8 bits of MD32 */
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M	0xff
+
+#define CPFL_ACT_TYPE_CHAIN_DATA_S	29
+#define CPFL_ACT_TYPE_CHAIN_DATA_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S)
+#define CPFL_ACT_TYPE_CHAIN_DATA	((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_OP_S	21
+#define CPFL_ACT_24B_SET_MD_OP_8B	((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_16B	((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_32B	((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \
+	(CPFL_ACT_TYPE_CHAIN_DATA | (op) | \
+	 (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \
+	 (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \
+	 (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_DATA_M))
+
+/*******************************************************************************
+ * 1-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_drop - Encode a 1-bit DROP action
+ *
+ * The DROP action has precedence over the DEFER_DOP action.
+ * Affect of ACT_COMMIT action on the DROP action:
+ *  - CPFL_ACT_COMMIT_ALL: Packet is dropped.
+ *  - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped.
+ *  - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped.  Recirculation is canceled.
+ *  - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not
+ *    dropped. Recirculation continues.
+ *
+ * Once a DROP action is set, it cannot be reverted during the classification
+ * process of a network packet.
+ */
+static inline union cpfl_action_set
+cpfl_act_drop(uint8_t prec)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1);
+	return a;
+}
+
+/**
+ * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action
+ * An ACT_COMMIT action specifies if and when all actions are committed.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M,
+				  (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S);
+	return a;
+}
+
+/*******************************************************************************
+ * 8-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_meta - Encode an 8-bit MOD_META action
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_meta(uint8_t prec, uint8_t prof)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META,
+				  CPFL_ACT_8B_MOD_META_VALID | prof);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 16-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI)
+ *
+ * This encodes the "Forward to Single VSI" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    vsi >= CPFL_ACT_16B_FWD_VSI_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port)
+ *
+ * This encodes the "Forward to a port" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    port >= CPFL_ACT_16B_FWD_PORT_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 24-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action
+ *
+ * This MOD_ADDR specifies the index of the MOD content entry an accompanying
+ * MOD_PROFILE action uses.  Some MOD_PROFILE actions may need to use extra
+ * information from a Modify content entry, and requires an accompanying
+ * MOD_ADDR action.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR,
+				     mod_addr);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant)
+ *
+ * This action is a "Forward to a single queue" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q,
+			bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region)
+ *
+ * This action is a "Forward to a queue region" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base,
+			       uint8_t q_rgn_bits, bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT ||
+	    q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base |
+		((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action
+ *
+ * This action specifies a Modify profile to use for modifying the network
+ * packet being classified.  In addition, it also provides a hint to whether
+ * or not an accompanied MOD_ADDR action is expected and should be prefetched.
+ *
+ * There is only one MOD_PROFILE action slot.  If multiple classification blocks
+ * emit this action, the precedence value and auxiliary precedence value will be
+ * used to select one with higher precedence.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus,
+		     bool miss_prepend, enum cpfl_act_mod_profile_hint hint)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) ||
+	    prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT ||
+	    ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_MOD_PROFILE_VALID |
+		(((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) &
+			CPFL_ACT_24B_MOD_PROFILE_HINT_M) |
+		(((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) &
+			CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) |
+		((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S);
+	if (append_act_bus)
+		val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS;
+	if (miss_prepend)
+		val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND;
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_meter - Encode a 24-bit METER action
+ *
+ * Return NOP if any given input parameter is invalid.
+ *
+ * A bank can only be used by one of the METER action slots.  If multiple METER
+ * actions select the same bank, the action with the highest action slot wins.
+ * In Policer mode, METER actions at the higher indexes have precedence over
+ * ones at lower indexes.
+ */
+static inline union cpfl_action_set
+cpfl_act_meter(uint8_t slot, uint8_t prec, uint32_t idx, uint8_t bank)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS  ||
+	    idx >= CPFL_ACT_24B_METER_INDEX_CNT ||
+	    bank >= CPFL_ACT_24B_METER_BANK_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_METER_VALID |
+		(uint32_t)idx << CPFL_ACT_24B_METER_INDEX_S |
+		(uint32_t)bank << CPFL_ACT_24B_METER_BANK_S;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot,
+				     val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot
+ *
+ * This SET_MD action sets/updates a byte of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 bytes of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX ||
+	    off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) |
+		((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) |
+		((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot
+ *
+ * This SET_MD action sets/updates a word of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 words of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX ||
+	    word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) |
+		((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) |
+		((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot
+ *
+ * This SET_MD action sets/updates a dword of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action is made up of 2 chained
+ * action sets.  The chained action set is the first.  The base/parent action
+ * sets is the second.
+ */
+static inline void
+cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid,
+		      uint8_t off, uint32_t val)
+{
+	if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) ||
+	    mid >= CPFL_METADATA_ID_CNT ||
+	    (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) {
+		ext->acts[0] = cpfl_act_nop();
+		ext->acts[1] = cpfl_act_nop();
+	} else {
+		uint32_t tmp;
+
+		/* Chained action set comes first */
+		tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR;
+		ext->acts[0].data =
+			CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B,
+						     mid, off, tmp);
+
+		/* Lower 24 bits of value */
+		tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M;
+		ext->acts[1].data =
+			CPFL_ACT_MAKE_24B_B(prec,
+					    CPFL_ACT_24B_INDEX_SET_MD + slot,
+					    tmp);
+	}
+}
+
+#endif /* _CPFL_ACTIONS_H_ */
diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c
new file mode 100644
index 0000000000..eefae1767c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.c
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include <base/idpf_controlq.h>
+#include <stdint.h>
+#include "cpfl_rules.h"
+
+ /**
+  * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor
+  */
+static inline uint64_t
+cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
+{
+	uint64_t context = 0;
+
+	switch (cmn_cfg->opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		/* fallthrough */
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+	case cpfl_ctlq_sem_add_rule:
+	case cpfl_ctlq_sem_del_rule:
+	case cpfl_ctlq_sem_query_rule:
+	case cpfl_ctlq_sem_update_rule:
+		context |= SHIFT_VAL64(cmn_cfg->time_sel,
+				       MEV_RULE_TIME_SEL);
+		context |= SHIFT_VAL64(cmn_cfg->time_sel_val,
+				       MEV_RULE_TIME_SEL_VAL);
+		context |= SHIFT_VAL64(cmn_cfg->host_id,
+				       MEV_RULE_HOST_ID);
+		context |= SHIFT_VAL64(cmn_cfg->port_num,
+				       MEV_RULE_PORT_NUM);
+		context |= SHIFT_VAL64(cmn_cfg->resp_req,
+				       MEV_RULE_RESP_REQ);
+		context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru,
+				       MEV_RULE_CACHE_WR_THRU);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc_ctx - get bit context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
+{
+	uint64_t context = 0;
+
+	context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common);
+
+	switch (cfg_data->common.opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size,
+				       MEV_RULE_MOD_OBJ_SIZE);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content,
+				       MEV_RULE_PIN_MOD_CONTENT);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.index,
+				       MEV_RULE_MOD_INDEX);
+		break;
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
+				       MEV_RULE_OBJ_ID);
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
+				       MEV_RULE_OBJ_ADDR);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc - build descriptor data from rule config data
+ *
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg)
+{
+	uint64_t context;
+	uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0];
+
+	context = cpfl_prep_rule_desc_ctx(cfg_data);
+	*ctlq_ctx = CPU_TO_LE64(context);
+	memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t));
+	ctlq_msg->opcode = (uint16_t)cfg_data->common.opc;
+	ctlq_msg->data_len = cfg_data->common.buf_len;
+	ctlq_msg->status = 0;
+	ctlq_msg->ctx.indirect.payload = cfg_data->common.payload;
+}
+
+/**
+ * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob)
+{
+	uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions;
+	const uint32_t *act_src = (const uint32_t *)act_bytes;
+	uint32_t i;
+
+	idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
+	idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len,
+		    CPFL_NONDMA_TO_DMA);
+
+	for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
+		*act_dst++ = CPU_TO_LE32(*act_src++);
+
+	*((uint16_t *)&rule_blob->sem_rule.cfg_ctrl) = CPU_TO_LE16(cfg_ctrl);
+}
diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h
new file mode 100644
index 0000000000..d23eae8e91
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_RULES_API_H_
+#define _CPFL_RULES_API_H_
+
+#include <base/idpf_controlq_api.h>
+#include "cpfl_actions.h"
+#include "cpfl_controlq.h"
+
+/* Common Bit Mask Macros */
+#define CPFL_BIT(b)			(1 << (b))
+
+#define MAKE_MASK(type, mask, shift)	((u##type) (mask) << (shift))
+#define SHIFT_VAL_LT(type, val, field)		\
+		(((u##type)(val) << field##_S) & field##_M)
+#define SHIFT_VAL_RT(type, val, field)		\
+		(((u##type)(val) & field##_M) >> field##_S)
+
+#define MAKE_MASK_VAL(type, bit_len)	(((u##type)0x01 << (bit_len)) - 1)
+#define MAKE_MASK_VAL16(bit_len)	MAKE_MASK_VAL(16, bit_len)
+#define MAKE_MASK_VAL64(bit_len)	MAKE_MASK_VAL(64, bit_len)
+
+#define MAKE_MASK64(mask, shift)	MAKE_MASK(64, mask, shift)
+#define MAKE_MASK16(mask, shift)	MAKE_MASK(16, mask, shift)
+#define MAKE_MASK32(mask, shift)	MAKE_MASK(32, mask, shift)
+
+/* Make masks with bit length and left-shifting count */
+#define MAKE_SMASK(type, bits, shift)	\
+	((((u##type)1 << (bits)) - 1) << (shift))
+#define MAKE_SMASK64(bits, shift)	MAKE_SMASK(64, bits, shift)
+#define MAKE_SMASK32(bits, shift)	MAKE_SMASK(32, bits, shift)
+#define MAKE_SMASK16(bits, shift)	MAKE_SMASK(16, bits, shift)
+
+#define SHIFT_VAL64(val, field)		SHIFT_VAL_LT(64, val, field)
+#define SHIFT_VAL32(val, field)		SHIFT_VAL_LT(32, val, field)
+#define SHIFT_VAL16(val, field)		SHIFT_VAL_LT(16, val, field)
+
+/* Rule Config queue opcodes */
+enum cpfl_ctlq_rule_cfg_opc {
+	cpfl_ctlq_sem_add_rule				= 0x1303,
+	cpfl_ctlq_sem_update_rule			= 0x1304,
+	cpfl_ctlq_sem_del_rule				= 0x1305,
+	cpfl_ctlq_sem_query_rule			= 0x1306,
+	cpfl_ctlq_sem_query_rule_hash_addr		= 0x1307,
+	cpfl_ctlq_sem_query_del_rule_hash_addr		= 0x1308,
+
+	cpfl_ctlq_mod_add_update_rule			= 0x1360,
+	cpfl_ctlq_mod_query_rule			= 0x1361,
+};
+
+enum cpfl_cfg_pkt_error_code {
+	CPFL_CFG_PKT_ERR_OK = 0,
+	CPFL_CFG_PKT_ERR_ESRCH = 1,     /* Bad opcode */
+	CPFL_CFG_PKT_ERR_EEXIST = 2,    /* Entry Already exists */
+	CPFL_CFG_PKT_ERR_ENOSPC = 4,    /* No space left in the table*/
+	CPFL_CFG_PKT_ERR_ERANGE = 5,    /* Parameter out of range */
+	CPFL_CFG_PKT_ERR_ESBCOMP = 6,   /* Completion Error */
+	CPFL_CFG_PKT_ERR_ENOPIN = 7,    /* Entry cannot be pinned in cache */
+	CPFL_CFG_PKT_ERR_ENOTFND = 8,   /* Entry Not exists */
+	CPFL_CFG_PKT_ERR_EMAXCOL = 9    /* Max Hash Collision */
+};
+
+/* macros for creating context for rule descriptor */
+#define MEV_RULE_VSI_ID_S		0
+#define MEV_RULE_VSI_ID_M		\
+		MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S)
+
+#define MEV_RULE_TIME_SEL_S		13
+#define MEV_RULE_TIME_SEL_M		\
+		MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S)
+
+#define MEV_RULE_TIME_SEL_VAL_S		15
+#define MEV_RULE_TIME_SEL_VAL_M		\
+		MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S)
+
+#define MEV_RULE_PORT_NUM_S		16
+#define MEV_RULE_HOST_ID_S		18
+#define MEV_RULE_PORT_NUM_M		\
+		MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S)
+#define MEV_RULE_HOST_ID_M		\
+		MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S)
+
+#define MEV_RULE_CACHE_WR_THRU_S	21
+#define MEV_RULE_CACHE_WR_THRU_M	\
+		MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S)
+
+#define MEV_RULE_RESP_REQ_S		22
+#define MEV_RULE_RESP_REQ_M		\
+		MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S)
+#define MEV_RULE_OBJ_ADDR_S		24
+#define MEV_RULE_OBJ_ADDR_M		\
+		MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S)
+#define MEV_RULE_OBJ_ID_S		59
+#define MEV_RULE_OBJ_ID_M		\
+		MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S)
+
+/* macros for creating CFG_CTRL for sem/lem rule blob */
+#define MEV_RULE_CFG_CTRL_PROF_ID_S			0
+#define MEV_RULE_CFG_CTRL_PROF_ID_M			\
+		MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S)
+
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S		11
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M		\
+		MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S)
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_S		13
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S)
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S	14
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M	\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S)
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S		15
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S)
+
+/**
+ * macro to build the CFG_CTRL for rule packet data, which is one of
+ * cpfl_prep_sem_rule_blob()'s input parameter.
+ */
+ /* build SEM CFG_CTRL*/
+#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id,		       \
+				       pin_to_cache, fixed_fetch)	       \
+		(SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID)	     | \
+		 SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID)   | \
+		 SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE)    | \
+		 SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH))
+
+/* build LEM CFG_CTRL*/
+#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror)    \
+		(SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID)             | \
+		 SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE)      | \
+		 SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR))
+
+/* macros for creating mod content config packets */
+#define MEV_RULE_MOD_INDEX_S		24
+#define MEV_RULE_MOD_INDEX_M		\
+		MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S)
+
+#define MEV_RULE_PIN_MOD_CONTENT_S	62
+#define MEV_RULE_PIN_MOD_CONTENT_M	\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S)
+#define MEV_RULE_MOD_OBJ_SIZE_S		63
+#define MEV_RULE_MOD_OBJ_SIZE_M		\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S)
+
+/**
+ * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM
+ * note: The key may be in mixed big/little endian format, the rest of members
+ * are in little endian
+ */
+struct cpfl_sem_rule_cfg_pkt {
+#define MEV_SEM_RULE_KEY_SIZE 128
+	uint8_t key[MEV_SEM_RULE_KEY_SIZE];
+
+#define MEV_SEM_RULE_ACT_SIZE 72
+	uint8_t actions[MEV_SEM_RULE_ACT_SIZE];
+
+	/* Bit(s):
+	 * 10:0 : PROFILE_ID
+	 * 12:11: SUB_PROF_ID (used for SEM only)
+	 * 13   : pin the SEM key content into the cache
+	 * 14   : Reserved
+	 * 15   : Fixed_fetch
+	 */
+	uint8_t cfg_ctrl[2];
+
+	/* Bit(s):
+	 * 0:     valid
+	 * 15:1:  Hints
+	 * 26:16: PROFILE_ID, the profile associated with the entry
+	 * 31:27: PF
+	 * 55:32: FLOW ID (assigned by HW)
+	 * 63:56: EPOCH
+	 */
+	uint8_t ctrl_word[8];
+	uint8_t padding[46];
+};
+
+/**
+ * union cpfl_rule_cfg_pkt_record - Describes rule data blob
+ */
+union cpfl_rule_cfg_pkt_record {
+	struct cpfl_sem_rule_cfg_pkt sem_rule;
+	uint8_t pkt_data[256];
+	uint8_t mod_blob[256];
+};
+
+/**
+ * cpfl_rule_query_addr - LEM/SEM Rule Query Address structure
+ */
+struct cpfl_rule_query_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_query_del_addr - Rule Query and Delete Address
+ */
+struct cpfl_rule_query_del_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_mod_content - MOD Rule Content
+ */
+struct cpfl_rule_mod_content {
+	uint8_t	obj_size;
+	uint8_t	pin_content;
+	uint32_t	index;
+};
+
+/**
+ * cpfl_rule_cfg_data_common - data struct for all rule opcodes
+ *note: some rules may only require part of structure
+ */
+struct cpfl_rule_cfg_data_common {
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	uint64_t	cookie;
+	uint16_t	vsi_id;
+	uint8_t	port_num;
+	uint8_t	host_id;
+	uint8_t	time_sel;
+	uint8_t	time_sel_val;
+	uint8_t	cache_wr_thru;
+	uint8_t	resp_req;
+	uint32_t	ret_val;
+	uint16_t	buf_len;
+	struct idpf_dma_mem *payload;
+};
+
+/**
+ * cpfl_rule_cfg_data - rule config data
+ * note: Before sending rule to HW, caller needs to fill
+ *       in this struct then call cpfl_prep_rule_desc().
+ */
+struct cpfl_rule_cfg_data {
+	struct cpfl_rule_cfg_data_common common;
+	union {
+		struct cpfl_rule_query_addr query_addr;
+		struct cpfl_rule_query_del_addr query_del_addr;
+		struct cpfl_rule_mod_content mod_content;
+	} ext;
+};
+
+/**
+ * cpfl_fill_rule_mod_content - fill info for mod content
+ */
+static inline void
+cpfl_fill_rule_mod_content(uint8_t mod_obj_size,
+			   uint8_t pin_mod_content,
+			   uint32_t mod_index,
+			   struct cpfl_rule_mod_content *mod_content)
+{
+	mod_content->obj_size = mod_obj_size;
+	mod_content->pin_content = pin_mod_content;
+	mod_content->index = mod_index;
+}
+
+/**
+ * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes
+ * note: call this function before calls cpfl_prep_rule_desc()
+ */
+static inline void
+cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc,
+			       uint64_t cookie,
+			       uint16_t vsi_id,
+			       uint8_t port_num,
+			       uint8_t host_id,
+			       uint8_t time_sel,
+			       uint8_t time_sel_val,
+			       uint8_t cache_wr_thru,
+			       uint8_t resp_req,
+			       uint16_t payload_len,
+			       struct idpf_dma_mem *payload,
+			       struct cpfl_rule_cfg_data_common *cfg_cmn)
+{
+	cfg_cmn->opc = opc;
+	cfg_cmn->cookie = cookie;
+	cfg_cmn->vsi_id = vsi_id;
+	cfg_cmn->port_num = port_num;
+	cfg_cmn->resp_req = resp_req;
+	cfg_cmn->ret_val = 0;
+	cfg_cmn->host_id = host_id;
+	cfg_cmn->time_sel = time_sel;
+	cfg_cmn->time_sel_val = time_sel_val;
+	cfg_cmn->cache_wr_thru = cache_wr_thru;
+
+	cfg_cmn->buf_len = payload_len;
+	cfg_cmn->payload = payload;
+}
+
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg);
+
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob);
+
+#endif /* _CPFL_RULES_API_H_ */
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 290ff1e655..e2b6621cea 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -19,6 +19,7 @@ sources = files(
         'cpfl_vchnl.c',
         'cpfl_representor.c',
         'cpfl_controlq.c',
+	'cpfl_rules.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v6 6/8] net/cpfl: add fxp rule module
  2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
                           ` (4 preceding siblings ...)
  2023-08-22  1:02         ` [PATCH v6 5/8] net/cpfl: add FXP low level implementation Zhang, Yuying
@ 2023-08-22  1:02         ` Zhang, Yuying
  2023-08-22  1:02         ` [PATCH v6 7/8] net/cpfl: add fxp flow engine Zhang, Yuying
                           ` (2 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-22  1:02 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Added low level fxp module for rule packing / creation / destroying.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c   |  31 ++++
 drivers/net/cpfl/cpfl_ethdev.h   |   6 +
 drivers/net/cpfl/cpfl_fxp_rule.c | 296 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_fxp_rule.h |  68 +++++++
 drivers/net/cpfl/meson.build     |   1 +
 5 files changed, 402 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a2bc6784d0..da78e79652 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 #include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
 	adapter->vports[vport->sw_idx] = NULL;
+	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
 	rte_free(cpfl_vport);
 
 	return 0;
@@ -2466,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
 	return 0;
 }
 
+int
+cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size,
+			 int batch_size)
+{
+	int i;
+
+	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
+		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < batch_size; i++) {
+		dma[i].va = (void *)((uint64_t)orig_dma->va + size * (i + 1));
+		dma[i].pa = orig_dma->pa + size * (i + 1);
+		dma[i].size = size;
+		dma[i].zone = NULL;
+	}
+	return 0;
+}
+
 static int
 cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
@@ -2515,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
 			    &dev->data->mac_addrs[0]);
 
+	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
+	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
+				       cpfl_vport->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		goto err_mac_addrs;
+
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
 		memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
 		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7f83d170d7..8eeeac9910 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -147,10 +147,14 @@ enum cpfl_itf_type {
 
 TAILQ_HEAD(cpfl_flow_list, rte_flow);
 
+#define CPFL_FLOW_BATCH_SIZE  490
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
 	struct cpfl_flow_list flow_list;
+	struct idpf_dma_mem flow_dma;
+	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
 	void *data;
 };
 
@@ -240,6 +244,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
+int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma,
+			     uint32_t size, int batch_size);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
new file mode 100644
index 0000000000..50fac55432
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.c
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		   struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_msg **msg_ptr_list;
+	u16 clean_count = 0;
+	int num_cleaned = 0;
+	int retries = 0;
+	int ret = 0;
+
+	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+	if (!msg_ptr_list) {
+		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+		goto send_err;
+	}
+
+	while (retries <= CTLQ_SEND_RETRIES) {
+		clean_count = num_q_msg - num_cleaned;
+		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+					       &msg_ptr_list[num_cleaned]);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+			goto send_err;
+		}
+
+		num_cleaned += clean_count;
+		retries++;
+		if (num_cleaned >= num_q_msg)
+			break;
+		rte_delay_us_sleep(10);
+	}
+
+	if (retries > CTLQ_SEND_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for completions");
+		ret = -1;
+		goto send_err;
+	}
+
+send_err:
+	if (msg_ptr_list)
+		free(msg_ptr_list);
+err:
+	return ret;
+}
+
+static int
+cpfl_process_rx_ctlq_msg(u16 num_q_msg, struct idpf_ctlq_msg *q_msg)
+{
+	u16 i;
+
+	if (!num_q_msg || !q_msg)
+		return -EINVAL;
+
+	for (i = 0; i < num_q_msg; i++) {
+		if (q_msg[i].status == CPFL_CFG_PKT_ERR_OK) {
+			continue;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_EEXIST &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_add_rule) {
+			PMD_INIT_LOG(ERR, "The rule has confliction with already existed one");
+			return -EINVAL;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_ENOTFND &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_del_rule) {
+			PMD_INIT_LOG(ERR, "The rule has already deleted");
+			return -EINVAL;
+		} else {
+			PMD_INIT_LOG(ERR, "Invalid rule");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		      struct idpf_ctlq_msg q_msg[])
+{
+	int retries = 0;
+	struct idpf_dma_mem *dma;
+	u16 i;
+	uint16_t buff_cnt;
+	int ret = 0, handle_rule = 0;
+
+	retries = 0;
+	while (retries <= CTLQ_RECEIVE_RETRIES) {
+		rte_delay_us_sleep(10);
+		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+		    ret != CPFL_ERR_CTLQ_ERROR) {
+			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_EMPTY)
+			break;
+
+		ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to process rx_ctrlq msg");
+			handle_rule = ret;
+		}
+
+		for (i = 0; i < num_q_msg; i++) {
+			if (q_msg[i].data_len > 0)
+				dma = q_msg[i].ctx.indirect.payload;
+			else
+				dma = NULL;
+
+			buff_cnt = dma ? 1 : 0;
+			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+			if (ret)
+				PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+		}
+		break;
+	}
+
+	if (retries > CTLQ_RECEIVE_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+		ret = -1;
+	}
+
+	return ret + handle_rule;
+}
+
+static int
+cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		   struct idpf_ctlq_msg *msg)
+{
+	struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	struct cpfl_rule_cfg_data cfg = {0};
+
+	/* prepare rule blob */
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(&cfg, 0, sizeof(cfg));
+
+	/* fill info for both query and add/update */
+	cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+				   minfo->pin_mod_content,
+				   minfo->mod_index,
+				   &cfg.ext.mod_content);
+
+	/* only fill content for add/update */
+	memcpy(blob->mod_blob, minfo->mod_content,
+	       minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+	/* pack message */
+	cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+				       rinfo->cookie,
+				       0, /* vsi_id not used for mod */
+				       rinfo->port_num,
+				       NO_HOST_NEEDED,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       (u16)sizeof(*blob),
+				       (void *)dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		       struct idpf_ctlq_msg *msg, bool add)
+{
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	struct cpfl_rule_cfg_data cfg;
+	uint16_t cfg_ctrl;
+
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(msg, 0, sizeof(*msg));
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+							  rinfo->sem.sub_prof_id,
+							  rinfo->sem.pin_to_cache,
+							  rinfo->sem.fixed_fetch);
+		cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+					rinfo->act_bytes, rinfo->act_byte_len,
+					cfg_ctrl, blob);
+		opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+	} else {
+		PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
+		return -1;
+	}
+
+	cpfl_fill_rule_cfg_data_common(opc,
+				       rinfo->cookie,
+				       rinfo->vsi,
+				       rinfo->port_num,
+				       rinfo->host_id,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+	       struct idpf_ctlq_msg *msg, bool add)
+{
+	int ret = 0;
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
+			ret = -1;
+	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+		if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)
+			ret = -1;
+	} else {
+		PMD_INIT_LOG(ERR, "Invalid type of rule");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+int
+cpfl_rule_process(struct cpfl_itf *itf,
+		  struct idpf_ctlq_info *tx_cq,
+		  struct idpf_ctlq_info *rx_cq,
+		  struct cpfl_rule_info *rinfo,
+		  int rule_num,
+		  bool add)
+{
+	struct idpf_hw *hw = &itf->adapter->base.hw;
+	int i;
+	int ret = 0;
+
+	if (rule_num == 0)
+		return 0;
+
+	for (i = 0; i < rule_num; i++) {
+		ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Could not pack rule");
+			return ret;
+		}
+	}
+	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to send control message");
+		return ret;
+	}
+	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to update rule");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
new file mode 100644
index 0000000000..ed757b80b1
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+	uint16_t prof_id;
+	uint8_t sub_prof_id;
+	uint8_t key[CPFL_MAX_KEY_LEN];
+	uint8_t key_byte_len;
+	uint8_t pin_to_cache;
+	uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+	uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+	uint8_t mod_content_byte_len;
+	uint32_t mod_index;
+	uint8_t pin_mod_content;
+	uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+	CPFL_RULE_TYPE_NONE,
+	CPFL_RULE_TYPE_SEM,
+	CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+	enum cpfl_rule_type type;
+	uint64_t cookie;
+	uint8_t host_id;
+	uint8_t port_num;
+	uint8_t resp_req;
+	/* TODO: change this to be dynamically allocated/reallocated */
+	uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+	uint8_t act_byte_len;
+	/* vsi is used for lem and lpm rules */
+	uint16_t vsi;
+	uint8_t clear_mirror_1st_state;
+	/* mod related fields */
+	union {
+		struct cpfl_mod_rule_info mod;
+		struct cpfl_sem_rule_info sem;
+	};
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_process(struct cpfl_itf *itf,
+		      struct idpf_ctlq_info *tx_cq,
+		      struct idpf_ctlq_info *rx_cq,
+		      struct cpfl_rule_info *rinfo,
+		      int rule_num,
+		      bool add);
+int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		       struct idpf_ctlq_msg q_msg[]);
+int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+			  struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index e2b6621cea..6118a16329 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -45,6 +45,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
     sources += files(
 	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
+	    'cpfl_fxp_rule.c',
     )
     ext_deps += jansson_dep
 endif
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v6 7/8] net/cpfl: add fxp flow engine
  2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
                           ` (5 preceding siblings ...)
  2023-08-22  1:02         ` [PATCH v6 6/8] net/cpfl: add fxp rule module Zhang, Yuying
@ 2023-08-22  1:02         ` Zhang, Yuying
  2023-08-22  1:02         ` [PATCH v6 8/8] net/cpfl: add flow support for representor Zhang, Yuying
  2023-09-26 18:16         ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-22  1:02 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Adapt fxp low level as a flow engine.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h          |  27 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 583 ++++++++++++++++++++++++
 drivers/net/cpfl/meson.build            |   1 +
 3 files changed, 611 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 8eeeac9910..efb0eb5251 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -85,6 +85,8 @@
 
 #define CPFL_RX_CFGQ_NUM	4
 #define CPFL_TX_CFGQ_NUM	4
+#define CPFL_FPCP_CFGQ_TX	0
+#define CPFL_FPCP_CFGQ_RX	1
 #define CPFL_CFGQ_NUM		8
 
 /* bit[15:14] type
@@ -219,6 +221,8 @@ struct cpfl_adapter_ext {
 	struct rte_hash *repr_allowlist_hash;
 
 	struct cpfl_flow_js_parser *flow_parser;
+	struct rte_bitmap *mod_bm;
+	void *mod_bm_mem;
 
 	struct cpfl_metadata meta;
 
@@ -312,4 +316,27 @@ cpfl_get_vsi_id(struct cpfl_itf *itf)
 	return CPFL_INVALID_HW_ID;
 }
 
+static inline struct cpfl_itf *
+cpfl_get_itf_by_port_id(uint16_t port_id)
+{
+	struct rte_eth_dev *dev;
+
+	if (port_id >= RTE_MAX_ETHPORTS) {
+		PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS);
+		return NULL;
+	}
+
+	dev = &rte_eth_devices[port_id];
+	if (dev->state == RTE_ETH_DEV_UNUSED) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id);
+		return NULL;
+	}
+
+	if (!dev->data) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id);
+		return NULL;
+	}
+
+	return CPFL_DEV_TO_ITF(dev);
+}
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
new file mode 100644
index 0000000000..e0c08a77c3
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <math.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_memcpy.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include <ethdev_driver.h>
+#include "cpfl_rules.h"
+#include "cpfl_logs.h"
+#include "cpfl_ethdev.h"
+#include "cpfl_flow.h"
+#include "cpfl_fxp_rule.h"
+#include "cpfl_flow_parser.h"
+
+#define CPFL_COOKIE_DEF		0x1000
+#define CPFL_MOD_COOKIE_DEF	0x1237561
+#define CPFL_PREC_DEF		1
+#define CPFL_PREC_SET		5
+#define CPFL_TYPE_ID		3
+#define CPFL_OFFSET		0x0a
+#define CPFL_HOST_ID_DEF	0
+#define CPFL_PF_NUM_DEF		0
+#define CPFL_PORT_NUM_DEF	0
+#define CPFL_RESP_REQ_DEF	2
+#define CPFL_PIN_TO_CACHE_DEF	0
+#define CPFL_CLEAR_MIRROR_1ST_STATE_DEF	0
+#define CPFL_FIXED_FETCH_DEF	0
+#define CPFL_PTI_DEF		0
+#define CPFL_MOD_OBJ_SIZE_DEF	0
+#define CPFL_PIN_MOD_CONTENT_DEF	0
+
+#define CPFL_MAX_MOD_CONTENT_INDEX	256
+#define CPFL_MAX_MR_ACTION_NUM	8
+
+/* Struct used when parse detailed rule information with json file */
+struct cpfl_rule_info_meta {
+	struct cpfl_flow_pr_action pr_action;	/* json action field of pattern rule */
+	uint32_t pr_num;			/* number of pattern rules */
+	uint32_t mr_num;			/* number of modification rules */
+	uint32_t rule_num;			/* number of all rules */
+	struct cpfl_rule_info rules[0];
+};
+
+static uint32_t cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad);
+static void cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
+uint64_t cpfl_rule_cookie = CPFL_COOKIE_DEF;
+
+static int
+cpfl_fxp_create(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim = meta;
+	struct cpfl_vport *vport;
+
+	if (!rim)
+		return ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		/* Every vport has one pair control queues configured to handle message.
+		 * Even index is tx queue and odd index is rx queue.
+		 */
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
+				rim->rules, rim->rule_num, true);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "cpfl filter create flow fail");
+		rte_free(rim);
+		return ret;
+	}
+
+	flow->rule = rim;
+
+	return ret;
+}
+
+static inline void
+cpfl_fxp_rule_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+	flow->rule = NULL;
+}
+
+static int
+cpfl_fxp_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim;
+	uint32_t i;
+	struct cpfl_vport *vport;
+
+	rim = flow->rule;
+	if (!rim) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "no such flow create by cpfl filter");
+
+		return -rte_errno;
+	}
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		ret = -rte_errno;
+		goto err;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules,
+				rim->rule_num, false);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to destroy cpfl filter rule");
+		goto err;
+	}
+
+	/* free mod index */
+	for (i = rim->pr_num; i < rim->rule_num; i++)
+		cpfl_fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
+err:
+	cpfl_fxp_rule_free(flow);
+	return ret;
+}
+
+static bool
+cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action,
+		       struct cpfl_rule_info_meta *rim,
+		       int i)
+{
+	if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		struct cpfl_rule_info *rinfo = &rim->rules[i];
+
+		rinfo->type = CPFL_RULE_TYPE_SEM;
+		rinfo->sem.prof_id = pr_action->sem.prof;
+		rinfo->sem.sub_prof_id = pr_action->sem.subprof;
+		rinfo->sem.key_byte_len = pr_action->sem.keysize;
+		rte_memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len);
+		rinfo->sem.pin_to_cache = CPFL_PIN_TO_CACHE_DEF;
+		rinfo->sem.fixed_fetch = CPFL_FIXED_FETCH_DEF;
+	} else {
+		PMD_DRV_LOG(ERR, "Invalid pattern item.");
+		return false;
+	}
+
+	return true;
+}
+
+static int
+cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_rule_info *match_rinfo,
+		       struct cpfl_rule_info *mod_rinfo,
+		       const struct cpfl_flow_mr_action *mr_action)
+{
+	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
+	uint32_t mod_idx;
+	int i;
+	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
+	union cpfl_action_set *act_set =
+		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
+
+	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
+		return -EINVAL;
+
+	*act_set = cpfl_act_mod_profile(CPFL_PREC_DEF,
+					mr_action->mod.prof,
+					CPFL_PTI_DEF,
+					0, /* append */
+					0, /* prepend */
+					CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_idx = cpfl_fxp_mod_idx_alloc(adapter);
+	if (mod_idx == CPFL_MAX_MOD_CONTENT_INDEX) {
+		PMD_DRV_LOG(ERR, "Out of Mod Index.");
+		return -ENOMEM;
+	}
+
+	*act_set = cpfl_act_mod_addr(CPFL_PREC_DEF, mod_idx);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
+	minfo->mod_obj_size = CPFL_MOD_OBJ_SIZE_DEF;
+	minfo->pin_mod_content = CPFL_PIN_MOD_CONTENT_DEF;
+	minfo->mod_index = mod_idx;
+	mod_rinfo->cookie = CPFL_MOD_COOKIE_DEF;
+	mod_rinfo->port_num = CPFL_PORT_NUM_DEF;
+	mod_rinfo->resp_req = CPFL_RESP_REQ_DEF;
+
+	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
+	for (i = 0; i < minfo->mod_content_byte_len; i++)
+		minfo->mod_content[i] = mr_action->mod.data[i];
+
+	return 0;
+}
+
+#define CPFL_FXP_MAX_QREGION_SIZE 128
+#define CPFL_INVALID_QUEUE_ID -2
+static int
+cpfl_fxp_parse_action(struct cpfl_itf *itf,
+		      const struct rte_flow_action *actions,
+		      const struct cpfl_flow_mr_action *mr_action,
+		      struct cpfl_rule_info_meta *rim,
+		      int priority,
+		      int index)
+{
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *rss;
+	struct rte_eth_dev_data *data;
+	enum rte_flow_action_type action_type;
+	struct cpfl_vport *vport;
+	/* used when action is PORT_REPRESENTOR type */
+	struct cpfl_itf *dst_itf;
+	uint16_t dev_id; /* vsi id */
+	int queue_id = -1;
+	bool fwd_vsi = false;
+	bool fwd_q = false;
+	uint32_t i;
+	struct cpfl_rule_info *rinfo = &rim->rules[index];
+	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
+
+	priority = CPFL_PREC_MAX - priority;
+	for (action = actions; action->type !=
+	     RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			if (!fwd_vsi)
+				fwd_vsi = true;
+			else
+				goto err;
+
+			act_ethdev = action->conf;
+			dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id);
+
+			if (!dst_itf)
+				goto err;
+
+			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
+				vport = (struct cpfl_vport *)dst_itf;
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			} else {
+				queue_id = CPFL_INVALID_QUEUE_ID;
+			}
+
+			dev_id = cpfl_get_vsi_id(dst_itf);
+
+			if (dev_id == CPFL_INVALID_HW_ID)
+				goto err;
+
+			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			act_q = action->conf;
+			data = itf->data;
+			if (act_q->index >= data->nb_rx_queues)
+				goto err;
+
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += act_q->index;
+			*act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			if (rss->queue_num <= 1)
+				goto err;
+			for (i = 0; i < rss->queue_num - 1; i++) {
+				if (rss->queue[i + 1] != rss->queue[i] + 1)
+					goto err;
+			}
+			data = itf->data;
+			if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues)
+				goto err;
+			if (!(rte_is_power_of_2(rss->queue_num) &&
+			      rss->queue_num <= CPFL_FXP_MAX_QREGION_SIZE))
+				goto err;
+
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += rss->queue[0];
+			*act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id,
+								  log(rss->queue_num) / log(2), 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			(*act_set).data = cpfl_act_drop(priority).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			(*act_set).data = cpfl_act_set_commit_mode(priority, 0).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			goto err;
+		}
+	}
+
+	if (mr_action) {
+		uint32_t i;
+
+		for (i = 0; i < rim->mr_num; i++)
+			if (cpfl_parse_mod_content(itf->adapter, rinfo,
+						   &rim->rules[rim->pr_num + i],
+						   &mr_action[i]))
+				goto err;
+	}
+
+	return 0;
+
+err:
+	PMD_DRV_LOG(ERR, "Invalid action type");
+	return -EINVAL;
+}
+
+static void
+cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo)
+{
+	if (cpfl_rule_cookie == ~0llu)
+		cpfl_rule_cookie = CPFL_COOKIE_DEF;
+	rinfo->cookie = cpfl_rule_cookie++;
+	rinfo->host_id = CPFL_HOST_ID_DEF;
+	rinfo->port_num = CPFL_PORT_NUM_DEF;
+	rinfo->resp_req = CPFL_RESP_REQ_DEF;
+	rinfo->clear_mirror_1st_state = CPFL_CLEAR_MIRROR_1ST_STATE_DEF;
+}
+
+static bool
+cpfl_is_mod_action(const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			return true;
+		default:
+			continue;
+		}
+	}
+	return false;
+}
+
+static int
+cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      void **meta)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_pr_action pr_action = { 0 };
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_flow_mr_action mr_action[CPFL_MAX_MR_ACTION_NUM] = { 0 };
+	uint32_t pr_num = 0;
+	uint32_t mr_num = 0;
+	struct cpfl_rule_info_meta *rim;
+	int ret;
+
+	ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr, &pr_action);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "No Match pattern support.");
+		return -EINVAL;
+	}
+
+	if (cpfl_is_mod_action(actions)) {
+		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "action parse fails.");
+			return -EINVAL;
+		}
+		mr_num++;
+	}
+
+	pr_num = 1;
+	rim = rte_zmalloc(NULL,
+			  sizeof(struct cpfl_rule_info_meta) +
+			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
+			  0);
+	if (!rim)
+		return -ENOMEM;
+
+	rim->pr_action = pr_action;
+	rim->pr_num = pr_num;
+	rim->mr_num = mr_num;
+	rim->rule_num = pr_num + mr_num;
+
+	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid pattern");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid action");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	cpfl_fill_rinfo_default_value(&rim->rules[0]);
+
+	if (!meta)
+		rte_free(rim);
+	else
+		*meta = rim;
+
+	return 0;
+}
+
+static int
+cpfl_fxp_mod_init(struct cpfl_adapter_ext *ad)
+{
+	uint32_t size = rte_bitmap_get_memory_footprint(CPFL_MAX_MOD_CONTENT_INDEX);
+	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+	if (!mem)
+		return -ENOMEM;
+
+	/* a set bit represent a free slot */
+	ad->mod_bm = rte_bitmap_init_with_all_set(CPFL_MAX_MOD_CONTENT_INDEX, mem, size);
+	if (!ad->mod_bm) {
+		rte_free(mem);
+		return -EINVAL;
+	}
+
+	ad->mod_bm_mem = mem;
+
+	return 0;
+}
+
+static void
+cpfl_fxp_mod_uninit(struct cpfl_adapter_ext *ad)
+{
+	rte_free(ad->mod_bm_mem);
+	ad->mod_bm_mem = NULL;
+	ad->mod_bm = NULL;
+}
+
+static uint32_t
+cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad)
+{
+	uint64_t slab = 0;
+	uint32_t pos = 0;
+
+	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
+		return CPFL_MAX_MOD_CONTENT_INDEX;
+
+	pos += __builtin_ffsll(slab) - 1;
+	rte_bitmap_clear(ad->mod_bm, pos);
+
+	return pos;
+}
+
+static void
+cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
+{
+	rte_bitmap_set(ad->mod_bm, idx);
+}
+
+static int
+cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
+	       struct rte_flow *flow __rte_unused,
+	       struct rte_flow_query_count *count __rte_unused,
+	       struct rte_flow_error *error)
+{
+	rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_HANDLE,
+			   NULL,
+			   "count action not supported by this module");
+
+	return -rte_errno;
+}
+
+static void
+cpfl_fxp_uninit(struct cpfl_adapter_ext *ad)
+{
+	cpfl_fxp_mod_uninit(ad);
+}
+
+static int
+cpfl_fxp_init(struct cpfl_adapter_ext *ad)
+{
+	int ret = 0;
+
+	ret = cpfl_fxp_mod_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct
+cpfl_flow_engine cpfl_fxp_engine = {
+	.type = CPFL_FLOW_ENGINE_FXP,
+	.init = cpfl_fxp_init,
+	.uninit = cpfl_fxp_uninit,
+	.create = cpfl_fxp_create,
+	.destroy = cpfl_fxp_destroy,
+	.query_count = cpfl_fxp_query,
+	.parse_pattern_action = cpfl_fxp_parse_pattern_action,
+};
+
+RTE_INIT(cpfl_sw_engine_init)
+{
+	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
+
+	cpfl_flow_engine_register(engine);
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 6118a16329..5fd1cbd045 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -46,6 +46,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
 	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
 	    'cpfl_fxp_rule.c',
+	    'cpfl_flow_engine_fxp.c',
     )
     ext_deps += jansson_dep
 endif
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v6 8/8] net/cpfl: add flow support for representor
  2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
                           ` (6 preceding siblings ...)
  2023-08-22  1:02         ` [PATCH v6 7/8] net/cpfl: add fxp flow engine Zhang, Yuying
@ 2023-08-22  1:02         ` Zhang, Yuying
  2023-09-26 18:16         ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-08-22  1:02 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add flow support for representor, so representor can
create, destroy, validate and flush rules.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 doc/guides/nics/cpfl.rst                | 13 ++++
 doc/guides/rel_notes/release_23_11.rst  |  1 +
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 90 ++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_representor.c     | 29 ++++++++
 4 files changed, 130 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 15680a10a6..6bfcc7137a 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -194,3 +194,16 @@ low level hardware resources.
    .. code-block:: console
 
    dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
+
+#. Create one flow to forward ETH-IPV4-TCP from I/O port to a local(CPF's) vport::
+
+   .. code-block:: console
+
+   flow create 0 ingress group 1 pattern eth dst is 00:01:00:00:03:14 / ipv4 src is 192.168.0.1 \
+   dst is 192.168.0.2 / tcp / end actions port_representor port_id 0 / end
+
+#. Send the packet, and it should be displayed on PMD::
+
+   .. code-block:: console
+
+   sendp(Ether(dst='00:01:00:00:03:14')/IP(src='192.168.0.1',dst='192.168.0.2')/TCP(),iface="enp24s0f0")
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 3d9be208d0..bad71ad3fd 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -81,6 +81,7 @@ New Features
 * **Updated Intel cpfl driver.**
 
   * Added support for port representor.
+  * Added support for rte_flow.
 
 Removed Items
 -------------
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index e0c08a77c3..fed18d8349 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -73,6 +73,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 	struct cpfl_adapter_ext *ad = itf->adapter;
 	struct cpfl_rule_info_meta *rim = meta;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	if (!rim)
 		return ret;
@@ -83,6 +84,10 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 		 * Even index is tx queue and odd index is rx queue.
 		 */
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -122,6 +127,7 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	uint32_t i;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	rim = flow->rule;
 	if (!rim) {
@@ -135,6 +141,10 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	if (itf->type == CPFL_ITF_TYPE_VPORT) {
 		vport = (struct cpfl_vport *)itf;
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -257,6 +267,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 	int queue_id = -1;
 	bool fwd_vsi = false;
 	bool fwd_q = false;
+	bool is_vsi;
 	uint32_t i;
 	struct cpfl_rule_info *rinfo = &rim->rules[index];
 	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
@@ -267,6 +278,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 		action_type = action->type;
 		switch (action_type) {
 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
 			if (!fwd_vsi)
 				fwd_vsi = true;
 			else
@@ -285,12 +297,20 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 				queue_id = CPFL_INVALID_QUEUE_ID;
 			}
 
-			dev_id = cpfl_get_vsi_id(dst_itf);
+			is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
+				  dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR);
+			if (is_vsi)
+				dev_id = cpfl_get_vsi_id(dst_itf);
+			else
+				dev_id = cpfl_get_port_id(dst_itf);
 
 			if (dev_id == CPFL_INVALID_HW_ID)
 				goto err;
 
-			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			if (is_vsi)
+				*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			else
+				*act_set = cpfl_act_fwd_port(0, priority, 0, dev_id);
 			act_set++;
 			rinfo->act_byte_len += sizeof(union cpfl_action_set);
 			break;
@@ -414,6 +434,64 @@ cpfl_is_mod_action(const struct rte_flow_action actions[])
 	return false;
 }
 
+static bool
+cpfl_fxp_get_metadata_port(struct cpfl_itf *itf,
+			   const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	const struct rte_flow_action_ethdev *ethdev;
+	struct cpfl_itf *target_itf;
+	bool ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		ret = cpfl_metadata_write_port_id(itf);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "fail to write port id");
+			return false;
+		}
+	}
+
+	ret = cpfl_metadata_write_sourcevsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write source vsi id");
+		return false;
+	}
+
+	ret = cpfl_metadata_write_vsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write vsi id");
+		return false;
+	}
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			ethdev = (const struct rte_flow_action_ethdev *)action->conf;
+			target_itf = cpfl_get_itf_by_port_id(ethdev->port_id);
+			if (!target_itf) {
+				PMD_DRV_LOG(ERR, "fail to get target_itf by port id");
+				return false;
+			}
+			ret = cpfl_metadata_write_targetvsi(target_itf);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "fail to write target vsi id");
+				return false;
+			}
+			break;
+		default:
+			continue;
+		}
+	}
+
+	return true;
+}
+
 static int
 cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
@@ -430,7 +508,13 @@ cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	int ret;
 
-	ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr, &pr_action);
+	ret = cpfl_fxp_get_metadata_port(itf, actions);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Fail to save metadata.");
+		return -EINVAL;
+	}
+
+	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "No Match pattern support.");
 		return -EINVAL;
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 4d15a26c80..de3b426727 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,8 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 static int
 cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
@@ -374,6 +376,22 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
+static int
+cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev,
+			   const struct rte_flow_ops **ops)
+{
+	if (!dev)
+		return -EINVAL;
+
+#ifdef RTE_HAS_JANSSON
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -385,6 +403,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
 
 	.link_update		= cpfl_repr_link_update,
+	.flow_ops_get		= cpfl_dev_repr_flow_ops_get,
 };
 
 static int
@@ -393,6 +412,7 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
 	struct cpfl_repr_param *param = init_param;
 	struct cpfl_adapter_ext *adapter = param->adapter;
+	int ret;
 
 	repr->repr_id = param->repr_id;
 	repr->vport_info = param->vport_info;
@@ -402,6 +422,15 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
 		repr->func_up = true;
 
+	TAILQ_INIT(&repr->itf.flow_list);
+	memset(repr->itf.dma, 0, sizeof(repr->itf.dma));
+	memset(repr->itf.msg, 0, sizeof(repr->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		return ret;
+
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v1 1/5] net/cpfl: setup rte flow skeleton
  2023-08-12  7:55 ` [PATCH v1 1/5] net/cpfl: setup rte flow skeleton Yuying Zhang
@ 2023-08-25  3:55   ` Xing, Beilei
  0 siblings, 0 replies; 128+ messages in thread
From: Xing, Beilei @ 2023-08-25  3:55 UTC (permalink / raw)
  To: Zhang, Yuying, dev, Zhang, Qi Z, Wu, Jingjing



> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Saturday, August 12, 2023 3:55 PM
> To: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Zhang, Yuying <yuying.zhang@intel.com>
> Subject: [PATCH v1 1/5] net/cpfl: setup rte flow skeleton
> 
> Setup the rte_flow backend skeleton. Introduce the framework to support
> different engines as rte_flow backend. Bridge rte_flow driver API to flow
> engines.
> 
> Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.c |  54 ++++++
>  drivers/net/cpfl/cpfl_ethdev.h |   5 +
>  drivers/net/cpfl/cpfl_flow.c   | 331 +++++++++++++++++++++++++++++++++
>  drivers/net/cpfl/cpfl_flow.h   |  88 +++++++++
>  drivers/net/cpfl/meson.build   |   3 +-
>  5 files changed, 480 insertions(+), 1 deletion(-)  create mode 100644
> drivers/net/cpfl/cpfl_flow.c  create mode 100644 drivers/net/cpfl/cpfl_flow.h
> 
<...>
> 
> +static int
> +cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
> +		      const struct rte_flow_ops **ops) {
> +	struct cpfl_itf *itf;
> +
> +	if (!dev)
> +		return -EINVAL;
> +
> +	itf = CPFL_DEV_TO_ITF(dev);
> +
> +	/* only vport support rte_flow */
> +	if (itf->type != CPFL_ITF_TYPE_VPORT)
> +		return -ENOTSUP;

Do we need this check? Seems this function is only for vport but not representor.

> +#ifdef CPFL_FLOW_JSON_SUPPORT
> +	*ops = &cpfl_flow_ops;
> +#else
> +	*ops = NULL;
> +	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c
> +library."); #endif
> +	return 0;
> +}
> +
<...>
> +
> +static int
> +cpfl_flow_valid_attr(const struct rte_flow_attr *attr,
> +		     struct rte_flow_error *error)

Better to use cpfl_flow_attr_valid to align with cpfl_flow_param_valid.

> +{
> +	if (attr->priority > 6) {

What's 6's meaning? Better to define macro to describe it.

> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
> +				   attr, "Only support priority 0-6.");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +}
> +
<...>
> +struct rte_flow *
> +cpfl_flow_create(struct rte_eth_dev *dev __rte_unused,
> +		 const struct rte_flow_attr *attr __rte_unused,
> +		 const struct rte_flow_item pattern[] __rte_unused,
> +		 const struct rte_flow_action actions[] __rte_unused,
> +		 struct rte_flow_error *error __rte_unused) {
> +	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
> +	struct cpfl_flow_engine *engine;
> +	struct rte_flow *flow;
> +	void *meta;
> +	int ret;
> +
> +	flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
> +	if (!flow) {
> +		rte_flow_error_set(error, ENOMEM,
> +				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "Failed to allocate memory");
> +		return NULL;
> +	}
> +
> +	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
> +	if (ret) {
> +		rte_free(flow);
> +		return NULL;
> +	}
> +
> +	engine = cpfl_flow_engine_match(dev, attr, pattern, actions, &meta);
> +	if (!engine) {
> +		rte_flow_error_set(error, ENOTSUP,
> RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				   NULL, "No matched engine");
> +		rte_free(flow);
> +		return NULL;
> +	}

cpfl_flow_param_valid and cpfl_flow_engine_match can be replaced with cpfl_flow_validate function.

> +
> +	if (!engine->create) {
> +		rte_flow_error_set(error, ENOTSUP,
> RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +				   NULL, "No matched flow creation function");
> +		rte_free(flow);
> +		return NULL;
> +	}
> +
> +	ret = engine->create(dev, flow, meta, error);
> +	if (ret) {
> +		rte_free(flow);
> +		return NULL;
> +	}
> +
> +	flow->engine = engine;
> +	TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
> +
> +	return flow;
> +}
> +

<...>

> +
> +int
> +cpfl_flow_query(struct rte_eth_dev *dev __rte_unused,
> +		struct rte_flow *flow __rte_unused,
> +		const struct rte_flow_action *actions __rte_unused,
> +		void *data __rte_unused,
> +		struct rte_flow_error *error __rte_unused) {

Why is __rte_unused used here?

> +	struct rte_flow_query_count *count = data;
> +	int ret = -EINVAL;
> +
> +	if (!flow || !flow->engine || !flow->engine->query_count) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_HANDLE,
> +				   NULL, "Invalid flow");
> +		return -rte_errno;
> +	}
> +
> +	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
> +		switch (actions->type) {
> +		case RTE_FLOW_ACTION_TYPE_VOID:
> +			break;
> +		case RTE_FLOW_ACTION_TYPE_COUNT:
> +			ret = flow->engine->query_count(dev, flow, count,
> error);
> +			break;
> +		default:
> +			ret = rte_flow_error_set(error, ENOTSUP,
> +
> RTE_FLOW_ERROR_TYPE_ACTION,
> +						 actions,
> +						 "action not supported");
> +			break;
> +		}
> +	}
> +
> +	return ret;
> +}
> +

<...>
> +void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
> +
> +struct cpfl_flow_engine *
> +cpfl_flow_engine_match(struct rte_eth_dev *dev,
> +		       const struct rte_flow_attr *attr,
> +		       const struct rte_flow_item pattern[],
> +		       const struct rte_flow_action actions[],
> +		       void **meta);
> +int
> +cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter); void
> +cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
> +
> +int cpfl_flow_init(struct cpfl_adapter_ext *ad); void
> +cpfl_flow_uninit(struct cpfl_adapter_ext *ad); struct rte_flow
> +*cpfl_flow_create(struct rte_eth_dev *dev,
> +				  const struct rte_flow_attr *attr,
> +				  const struct rte_flow_item pattern[],
> +				  const struct rte_flow_action actions[],
> +				  struct rte_flow_error *error);
> +int cpfl_flow_validate(struct rte_eth_dev *dev,
> +		       const struct rte_flow_attr *attr,
> +		       const struct rte_flow_item pattern[],
> +		       const struct rte_flow_action actions[],
> +		       struct rte_flow_error *error); int cpfl_flow_destroy(struct
> +rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
> +int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error
> +*error); int cpfl_flow_query(struct rte_eth_dev *dev,
> +		    struct rte_flow *flow,
> +		    const struct rte_flow_action *actions,
> +		    void *data,
> +		    struct rte_flow_error *error);

Please check all functions' declaration, no need new line.

> +#endif
> diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index
> 84ba994469..222497f7c2 100644
> --- a/drivers/net/cpfl/meson.build
> +++ b/drivers/net/cpfl/meson.build
> @@ -42,10 +42,11 @@ endif
>  js_dep = dependency('json-c', required: false, method : 'pkg-config')  if
> js_dep.found()
>      sources += files(
> +        'cpfl_flow.c',
>          'cpfl_flow_parser.c',
>          'cpfl_rules.c',
>          'cpfl_controlq.c',
>      )
>      dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
>      ext_deps += js_dep
> -endif
> \ No newline at end of file
> +endif
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v1 2/5] common/idpf/base: refine idpf ctlq message structure
  2023-08-12  7:55 ` [PATCH v1 2/5] common/idpf/base: refine idpf ctlq message structure Yuying Zhang
@ 2023-08-25  5:55   ` Xing, Beilei
  0 siblings, 0 replies; 128+ messages in thread
From: Xing, Beilei @ 2023-08-25  5:55 UTC (permalink / raw)
  To: Zhang, Yuying, dev, Zhang, Qi Z, Wu, Jingjing



> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Saturday, August 12, 2023 3:55 PM
> To: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Zhang, Yuying <yuying.zhang@intel.com>
> Subject: [PATCH v1 2/5] common/idpf/base: refine idpf ctlq message structure
> 
> Add cfg data in idpf_ctlq_msg.

Could you detail the commit log to describe why we need this field?

> 
> Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
> ---
>  drivers/common/idpf/base/idpf_controlq_api.h | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/drivers/common/idpf/base/idpf_controlq_api.h
> b/drivers/common/idpf/base/idpf_controlq_api.h
> index 3780304256..b38b10465c 100644
> --- a/drivers/common/idpf/base/idpf_controlq_api.h
> +++ b/drivers/common/idpf/base/idpf_controlq_api.h
> @@ -65,6 +65,9 @@ struct idpf_ctlq_msg {
>  			u32 chnl_opcode;
>  			u32 chnl_retval;
>  		} mbx;
> +		struct {
> +			u64 data;
> +		} cfg;
>  	} cookie;
>  	union {
>  #define IDPF_DIRECT_CTX_SIZE	16
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v1 3/5] net/cpfl: add cpfl control queue message handle
  2023-08-12  7:55 ` [PATCH v1 3/5] net/cpfl: add cpfl control queue message handle Yuying Zhang
@ 2023-08-25  6:23   ` Xing, Beilei
  0 siblings, 0 replies; 128+ messages in thread
From: Xing, Beilei @ 2023-08-25  6:23 UTC (permalink / raw)
  To: Zhang, Yuying, dev, Zhang, Qi Z, Wu, Jingjing



> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Saturday, August 12, 2023 3:55 PM
> To: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Zhang, Yuying <yuying.zhang@intel.com>
> Subject: [PATCH v1 3/5] net/cpfl: add cpfl control queue message handle
> 
> Add cpfl driver control queue message handle, including
> send/receive/clean/post_rx_buffs.
> 
> Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>

Seems all the functions are similar with functions in idpf shared code. 
Can we use idpf_ctrlq_xxx directly?
BTW, the new field in the 2nd patch is not used here, so is the new field 'data' necessary?

^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v1 4/5] net/cpfl: add fxp rule module
  2023-08-12  7:55 ` [PATCH v1 4/5] net/cpfl: add fxp rule module Yuying Zhang
@ 2023-08-25  7:35   ` Xing, Beilei
  2023-08-25  8:42   ` Xing, Beilei
  1 sibling, 0 replies; 128+ messages in thread
From: Xing, Beilei @ 2023-08-25  7:35 UTC (permalink / raw)
  To: Zhang, Yuying, dev, Zhang, Qi Z, Wu, Jingjing



> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Saturday, August 12, 2023 3:55 PM
> To: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Zhang, Yuying <yuying.zhang@intel.com>
> Subject: [PATCH v1 4/5] net/cpfl: add fxp rule module
> 
> Added low level fxp module for rule packing / creation / destroying.
> 
> Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.h   |   4 +
>  drivers/net/cpfl/cpfl_fxp_rule.c | 288 +++++++++++++++++++++++++++++++
> drivers/net/cpfl/cpfl_fxp_rule.h |  87 ++++++++++
>  drivers/net/cpfl/meson.build     |   1 +
>  4 files changed, 380 insertions(+)
>  create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c  create mode 100644
> drivers/net/cpfl/cpfl_fxp_rule.h
> 
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index
> c71f16ac60..63bcc5551f 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -145,10 +145,14 @@ enum cpfl_itf_type {
> 
>  TAILQ_HEAD(cpfl_flow_list, rte_flow);
> 
> +#define CPFL_FLOW_BATCH_SIZE  490
>  struct cpfl_itf {
>  	enum cpfl_itf_type type;
>  	struct cpfl_adapter_ext *adapter;
>  	struct cpfl_flow_list flow_list;
> +	struct idpf_dma_mem flow_dma;
> +	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
> +	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
>  	void *data;
>  };
> 
> diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
> new file mode 100644
> index 0000000000..936f57e4fa
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_fxp_rule.c
> @@ -0,0 +1,288 @@

<...>

> +int
> +cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t
> num_q_msg,
> +		      struct idpf_ctlq_msg q_msg[])
> +{
> +	int retries = 0;
> +	struct idpf_dma_mem *dma;
> +	uint16_t i;
> +	uint16_t buff_cnt;
> +	int ret = 0;
> +
> +	retries = 0;
> +	while (retries <= CTLQ_RECEIVE_RETRIES) {
> +		rte_delay_us_sleep(10);
> +		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
> +
> +		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
> +		    ret != CPFL_ERR_CTLQ_ERROR) {
> +			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err:
> 0x%4x\n", ret);
> +			retries++;
> +			continue;
> +		}
> +
> +		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
> +			retries++;
> +			continue;
> +		}
> +
> +		if (ret == CPFL_ERR_CTLQ_EMPTY)
> +			break;
> +
> +		ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
> +		if (ret) {
> +			PMD_INIT_LOG(WARNING, "failed to process rx_ctrlq
> msg");
> +			break;

Don't break, need to post buffer to recv ring.
Please check the internal fix patch.

> +		}
> +
> +		for (i = 0; i < num_q_msg; i++) {
> +			if (q_msg[i].data_len > 0)
> +				dma = q_msg[i].ctx.indirect.payload;
> +			else
> +				dma = NULL;
> +
> +			buff_cnt = dma ? 1 : 0;
> +			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt,
> &dma);
> +			if (ret)
> +				PMD_INIT_LOG(WARNING, "could not posted
> recv bufs\n");
> +		}
> +		break;
> +	}
> +
> +	if (retries > CTLQ_RECEIVE_RETRIES) {
> +		PMD_INIT_LOG(ERR, "timed out while polling for receive
> response");
> +		ret = -1;
> +	}
> +
> +	return ret;
> +}
> +
> +static int
> +pack_mod_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,

Please follow the function name style, how about cpfl_mod_rule_pack?

> +	      struct idpf_ctlq_msg *msg)

<...>
> +
> +static int pack_default_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem

static init
cpfl_default_rule_pack

> *dma,
> +			     struct idpf_ctlq_msg *msg, bool add) {
<...>
> +
> +static int pack_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,

static init
cpfl_rule_pack

> +		     struct idpf_ctlq_msg *msg, bool add) {
> +	int ret = 0;
> +
> +	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
> +		if (pack_default_rule(rinfo, dma, msg, add) < 0)
> +			ret = -1;
> +	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
> +		if (pack_mod_rule(rinfo, dma, msg) < 0)
> +			ret = -1;
> +	}

Need to check invalid rinfo->type? E.g CPFL_RULE_TYPE_LEM?

> +
> +	return ret;
> +}
> +
> +int
> +cpfl_rule_update(struct cpfl_itf *itf,
> +		 struct idpf_ctlq_info *tx_cq,
> +		 struct idpf_ctlq_info *rx_cq,
> +		 struct cpfl_rule_info *rinfo,
> +		 int rule_num,
> +		 bool add)
> +{
> +	struct idpf_hw *hw = &itf->adapter->base.hw;
> +	int i;
> +	int ret = 0;
> +
> +	if (rule_num == 0)
> +		return 0;
> +
> +	for (i = 0; i < rule_num; i++) {
> +		ret = pack_rule(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
> +		if (ret) {
> +			PMD_INIT_LOG(ERR, "Could not create rule");

Could not pack rule?

> +			return ret;
> +		}
> +	}
> +	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to send rule");
> +		return ret;
> +	}
> +	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to create rule");

Is this function for update rule or create rule?
The function name is rule_update, but seems it's to create rule.

> +		return ret;
> +	}
> +
> +	return 0;
> +}
> diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
> new file mode 100644
> index 0000000000..68efa8e3f8
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_fxp_rule.h
> @@ -0,0 +1,87 @@

<...>
> +
> +int cpfl_rule_update(struct cpfl_itf *itf,
> +		     struct idpf_ctlq_info *tx_cq,
> +		     struct idpf_ctlq_info *rx_cq,
> +		     struct cpfl_rule_info *rinfo,
> +		     int rule_num,
> +		     bool add);
> +int
> +cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t
> num_q_msg,
> +		   struct idpf_ctlq_msg q_msg[]);
> +int
> +cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t
> num_q_msg,

No need new line.

> +		      struct idpf_ctlq_msg q_msg[]);
> +#endif /*CPFL_FXP_RULE_H*/
> diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index
> 222497f7c2..4061123034 100644
> --- a/drivers/net/cpfl/meson.build
> +++ b/drivers/net/cpfl/meson.build
> @@ -46,6 +46,7 @@ if js_dep.found()
>          'cpfl_flow_parser.c',
>          'cpfl_rules.c',
>          'cpfl_controlq.c',
> +	'cpfl_fxp_rule.c',
>      )
>      dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
>      ext_deps += js_dep
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v1 4/5] net/cpfl: add fxp rule module
  2023-08-12  7:55 ` [PATCH v1 4/5] net/cpfl: add fxp rule module Yuying Zhang
  2023-08-25  7:35   ` Xing, Beilei
@ 2023-08-25  8:42   ` Xing, Beilei
  1 sibling, 0 replies; 128+ messages in thread
From: Xing, Beilei @ 2023-08-25  8:42 UTC (permalink / raw)
  To: Zhang, Yuying, dev, Zhang, Qi Z, Wu, Jingjing



> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Saturday, August 12, 2023 3:55 PM
> To: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Zhang, Yuying <yuying.zhang@intel.com>
> Subject: [PATCH v1 4/5] net/cpfl: add fxp rule module
> 
> Added low level fxp module for rule packing / creation / destroying.
> 
> Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.h   |   4 +
>  drivers/net/cpfl/cpfl_fxp_rule.c | 288 +++++++++++++++++++++++++++++++
> drivers/net/cpfl/cpfl_fxp_rule.h |  87 ++++++++++
>  drivers/net/cpfl/meson.build     |   1 +
>  4 files changed, 380 insertions(+)
>  create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c  create mode 100644
> drivers/net/cpfl/cpfl_fxp_rule.h
> 
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
> index c71f16ac60..63bcc5551f 100644

<...>
> +struct cpfl_lem_rule_info {
> +	uint16_t prof_id;
> +	uint8_t key[CPFL_MAX_KEY_LEN];
> +	uint8_t key_byte_len;
> +	uint8_t pin_to_cache;
> +	uint8_t fixed_fetch;
> +};

Remove LEM related structures and members below.
 
> +#define CPFL_MAX_MOD_CONTENT_LEN 256
> +struct cpfl_mod_rule_info {
> +	uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
> +	uint8_t mod_content_byte_len;
> +	uint32_t mod_index;
> +	uint8_t pin_mod_content;
> +	uint8_t mod_obj_size;
> +};
> +
> +enum cpfl_rule_type {
> +	CPFL_RULE_TYPE_NONE,
> +	CPFL_RULE_TYPE_SEM,
> +	CPFL_RULE_TYPE_LEM,
> +	CPFL_RULE_TYPE_MOD
> +};
> +
> +struct cpfl_rule_info {
> +	enum cpfl_rule_type type;
> +	uint64_t cookie;
> +	uint8_t host_id;
> +	uint8_t port_num;
> +	uint8_t resp_req;
> +	/* TODO: change this to be dynamically allocated/reallocated */
> +	uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union
> cpfl_action_set)];
> +	uint8_t act_byte_len;
> +	/* vsi is used for lem and lpm rules */
> +	uint16_t vsi;
> +	uint8_t clear_mirror_1st_state;
> +	/* mod related fields */
> +	union {
> +		struct cpfl_mod_rule_info mod;
> +		struct cpfl_sem_rule_info sem;
> +		struct cpfl_lem_rule_info lem;
> +	};
> +};
> +
> +struct cpfl_meter_action_info {
> +	uint8_t meter_logic_bank_id;
> +	uint32_t meter_logic_idx;
> +	uint8_t prof_id;
> +	uint8_t slot;
> +};

Remove meter lated.



^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v1 5/5] net/cpfl: add fxp flow engine
  2023-08-12  7:55 ` [PATCH v1 5/5] net/cpfl: add fxp flow engine Yuying Zhang
@ 2023-08-25  9:15   ` Xing, Beilei
  0 siblings, 0 replies; 128+ messages in thread
From: Xing, Beilei @ 2023-08-25  9:15 UTC (permalink / raw)
  To: Zhang, Yuying, dev, Zhang, Qi Z, Wu, Jingjing



> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Saturday, August 12, 2023 3:55 PM
> To: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Zhang, Yuying <yuying.zhang@intel.com>
> Subject: [PATCH v1 5/5] net/cpfl: add fxp flow engine
> 
> Adapt fxp low level as a flow engine.
> 
> Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.h          |  85 ++++
>  drivers/net/cpfl/cpfl_flow_engine_fxp.c | 610 ++++++++++++++++++++++++
>  drivers/net/cpfl/meson.build            |   1 +
>  3 files changed, 696 insertions(+)
>  create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
> 
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
> index 63bcc5551f..d7e9ea1a74 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -92,6 +92,8 @@
<...>
> +static inline uint16_t
> +cpfl_get_vsi_id(struct cpfl_itf *itf)
> +{
> +	struct cpfl_adapter_ext *adapter = itf->adapter;
> +	struct cpfl_vport_info *info;
> +	uint32_t vport_id;
> +	int ret;
> +	struct cpfl_vport_id vport_identity;
> +
> +	if (!itf)
> +		return CPFL_INVALID_HW_ID;
> +
> +	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
> +		struct cpfl_repr *repr = (void *)itf;
> +
> +		return repr->vport_info->vport_info.vsi_id;
> +	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
> +		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
> +		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
> +		/* host: HOST0_CPF_ID, acc: ACC_CPF_ID */
> +		vport_identity.pf_id = ACC_CPF_ID;
> +		vport_identity.vf_id = 0;
> +		vport_identity.vport_id = vport_id;
> +
> +		ret = rte_hash_lookup_data(adapter->vport_map_hash,
> &vport_identity,
> +					  (void **)&info);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "vport id not exist");
> +			goto err;
> +		}
> +
> +		/* rte_spinlock_unlock(&adapter->vport_map_lock); */
 
So do we need lock in the function?

> +		return info->vport_info.vsi_id;
> +	}
> +
> +err:
> +	/* rte_spinlock_unlock(&adapter->vport_map_lock); */
> +	return CPFL_INVALID_HW_ID;
> +}
> +
<...>
> 
>  #endif /* _CPFL_ETHDEV_H_ */
> diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
> b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
> new file mode 100644
> index 0000000000..e10639c842
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
> @@ -0,0 +1,610 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 Intel Corporation
> + */
> +
> +#include <sys/queue.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <stdarg.h>
> +#include <math.h>
> +#include <rte_debug.h>
> +#include <rte_ether.h>
> +#include <ethdev_driver.h>
> +#include <rte_log.h>
> +#include <rte_malloc.h>
> +#include <rte_eth_ctrl.h>
> +#include <rte_tailq.h>
> +#include <rte_flow_driver.h>
> +#include <rte_flow.h>
> +#include <rte_bitmap.h>
> +#include "cpfl_rules.h"
> +#include "cpfl_logs.h"
> +#include "cpfl_ethdev.h"
> +#include "cpfl_flow.h"
> +#include "cpfl_fxp_rule.h"
> +#include "cpfl_flow_parser.h"
> +#include "rte_memcpy.h"

#include <rte_memcpy.h> and move above?

> +
> +#define COOKIE_DEF	0x1000
> +#define PREC_MAX	7
> +#define PREC_DEF	1
> +#define PREC_SET	5
> +#define TYPE_ID		3
> +#define OFFSET		0x0a
> +#define HOST_ID_DEF	0
> +#define PF_NUM_DEF	0
> +#define PORT_NUM_DEF	0
> +#define RESP_REQ_DEF	2
> +#define PIN_TO_CACHE_DEF	0
> +#define CLEAR_MIRROR_1ST_STATE_DEF  0
> +#define FIXED_FETCH_DEF 0
> +#define PTI_DEF		0
> +#define MOD_OBJ_SIZE_DEF	0
> +#define PIN_MOD_CONTENT_DEF	0
> +
> +#define MAX_MOD_CONTENT_INDEX	256
> +#define MAX_MR_ACTION_NUM 8

For the new defined macros in PMD, better to use CPFL_ prefix. 

> +
> +struct rule_info_meta {

cpfl_rule_info_meta.
Please check all other macros, global variables, structures and functions, etc. I will not comment for those.

BTW, Could you add some comments for the new structures and the members? Then it will be more readable.

> +	struct cpfl_flow_pr_action pr_action;
> +	uint32_t pr_num;
> +	uint32_t mr_num;
> +	uint32_t rule_num;
> +	struct cpfl_rule_info rules[0];
> +};
> +
> +static uint32_t fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad); static
> +void fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
> +uint64_t rule_cookie = COOKIE_DEF;
> +
> +static int
> +cpfl_fxp_create(struct rte_eth_dev *dev,
> +		struct rte_flow *flow,
> +		void *meta,
> +		struct rte_flow_error *error)
> +{
> +	int ret = 0;
> +	uint32_t cpq_id = 0;
> +	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
> +	struct cpfl_adapter_ext *ad = itf->adapter;
> +	struct rule_info_meta *rim = meta;
> +	struct cpfl_vport *vport;
> +
> +	if (!rim)
> +		return ret;
> +
> +	if (itf->type == CPFL_ITF_TYPE_VPORT) {
> +		vport = (struct cpfl_vport *)itf;
> +		cpq_id = vport->base.devarg_id * 2;

Why is vport->base.devarg_id * 2 here? Could you add some comments?

> +	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {

So is the patch support both representor rule and represented port action?
It's better to split VPORT and REPRESENTOR support.

> +		cpq_id = CPFL_FPCP_CFGQ_TX;
> +	} else {
> +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "fail to find correct control queue");
> +		return -rte_errno;
> +	}
> +
> +	ret = cpfl_rule_update(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
> +			       rim->rules, rim->rule_num, true);

OK, I understand the function is to process the rule, right?
So how about cplf_rule_process?

> +	if (ret < 0) {
> +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "cpfl filter create flow fail");
> +		rte_free(rim);
> +		return ret;
> +	}
> +
> +	flow->rule = rim;
> +
> +	return ret;
> +}
> +
> +static inline void
> +cpfl_fxp_rule_free(struct rte_flow *flow) {
> +	rte_free(flow->rule);
> +	flow->rule = NULL;
> +}
> +
> +static int
> +cpfl_fxp_destroy(struct rte_eth_dev *dev,
> +		 struct rte_flow *flow,
> +		 struct rte_flow_error *error)
> +{
> +	int ret = 0;
> +	uint32_t cpq_id = 0;
> +	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
> +	struct cpfl_adapter_ext *ad = itf->adapter;
> +	struct rule_info_meta *rim;
> +	uint32_t i;
> +	struct cpfl_vport *vport;
> +
> +	rim = flow->rule;
> +	if (!rim) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "no such flow create by cpfl filter");
> +
> +		cpfl_fxp_rule_free(flow);

flow->rule is NULL, so no need to call the function.

> +
> +		return -rte_errno;
> +	}
> +
> +	if (itf->type == CPFL_ITF_TYPE_VPORT) {
> +		vport = (struct cpfl_vport *)itf;
> +		cpq_id = vport->base.devarg_id * 2;
> +	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
> +		cpq_id = CPFL_FPCP_CFGQ_TX;
> +	} else {
> +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "fail to find correct control queue");

Need to goto err here?

> +		return -rte_errno;
> +	}
> +
> +	ret = cpfl_rule_update(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
> rim->rules,
> +			       rim->rule_num, false);
> +	if (ret < 0) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "fail to destroy cpfl filter rule");
> +		goto err;
> +	}
> +
> +	/* free mod index */
> +	for (i = rim->pr_num; i < rim->rule_num; i++)
> +		fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
> +err:
> +	cpfl_fxp_rule_free(flow);
> +	return ret;
> +}
> +

<...>
> +
> +static int
> +cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
> +		       struct cpfl_rule_info *match_rinfo,
> +		       struct cpfl_rule_info *mod_rinfo,
> +		       const struct cpfl_flow_mr_action *mr_action) {
> +	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
> +	uint32_t mod_idx;
> +	int i;
> +	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
> +	union cpfl_action_set *act_set =
> +		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
> +
> +	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
> +		return -EINVAL;
> +
> +	*act_set = cpfl_act_mod_profile(PREC_DEF,
> +					mr_action->mod.prof,
> +					PTI_DEF,
> +					0, /* append */
> +					0, /* prepend */
> +
> 	CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
> +
> +	act_set++;
> +	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +
> +	mod_idx = fxp_mod_idx_alloc(adapter);
> +	if (mod_idx == MAX_MOD_CONTENT_INDEX) {
> +		PMD_DRV_LOG(ERR, "Out of Mod Index.");
> +		return -ENOMEM;
> +	}
> +
> +	*act_set = cpfl_act_mod_addr(PREC_DEF, mod_idx);
> +
> +	act_set++;
> +	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +
> +	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
> +	minfo->mod_obj_size = MOD_OBJ_SIZE_DEF;
> +	minfo->pin_mod_content = PIN_MOD_CONTENT_DEF;
> +	minfo->mod_index = mod_idx;
> +	mod_rinfo->cookie = 0x1237561;

How about add a macro for 0x1237561?

> +	mod_rinfo->port_num = PORT_NUM_DEF;
> +	mod_rinfo->resp_req = RESP_REQ_DEF;
> +
> +	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
> +	for (i = 0; i < minfo->mod_content_byte_len; i++)
> +		minfo->mod_content[i] = mr_action->mod.data[i];
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_fxp_parse_action(struct cpfl_itf *itf,
> +		      const struct rte_flow_action *actions,
> +		      const struct cpfl_flow_mr_action *mr_action,
> +		      struct rule_info_meta *rim,
> +		      int priority,
> +		      int index,
> +		      bool is_vport_rule)
> +{
> +	const struct rte_flow_action_ethdev *act_ethdev;
> +	const struct rte_flow_action *action;
> +	const struct rte_flow_action_queue *act_q;
> +	const struct rte_flow_action_rss *rss;
> +	struct rte_eth_dev_data *data;
> +	enum rte_flow_action_type action_type;
> +	struct cpfl_vport *vport;
> +	/* used when action is REPRESENTED_PORT or REPRESENTED_PORT
> type */

Represented port or port representor?
Also, can we split the VPORT and REPRESENTOR flow support?

> +	struct cpfl_itf *dst_itf;
> +	uint16_t dev_id; /*vsi_id or phyical port id*/
> +	bool is_vsi;
> +	bool set_meta_valid = false;
> +	int queue_id = -1;
> +	bool fwd_vsi = false;
> +	bool fwd_q = false;
> +	bool fwd_jump = false;
> +	uint32_t i;
> +	struct cpfl_rule_info *rinfo = &rim->rules[index];
> +	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
> +
> +	priority = PREC_MAX - priority;
> +	for (action = actions; action->type !=
> +			RTE_FLOW_ACTION_TYPE_END; action++) {
> +		action_type = action->type;
> +		switch (action_type) {
> +		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
> +		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
> +			if (!fwd_vsi && !fwd_jump)
> +				fwd_vsi = true;
> +			else
> +				goto err;
> +			if (is_vport_rule) {
> +				dst_itf = itf;
> +			} else {
> +				act_ethdev = action->conf;
> +				dst_itf = cpfl_get_itf_by_port_id(act_ethdev-
> >port_id);
> +			}
> +
> +			if (!dst_itf)
> +				goto err;
> +
> +			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
> +				vport = (struct cpfl_vport *)dst_itf;
> +				queue_id = vport-
> >base.chunks_info.rx_start_qid;
> +			} else {
> +				queue_id = -2;

Why's -2 here?

> +			}
> +
> +			is_vsi = (action_type ==
> RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
> +				dst_itf->type ==
> CPFL_ITF_TYPE_REPRESENTOR);
> +			if (is_vsi || is_vport_rule)
> +				dev_id = cpfl_get_vsi_id(dst_itf);
> +			else
> +				dev_id = cpfl_get_port_id(dst_itf);
> +
> +			if (dev_id == CPFL_INVALID_HW_ID)
> +				goto err;
> +
> +			if (is_vsi || is_vport_rule)
> +				*act_set = cpfl_act_fwd_vsi(0, priority, 0,
> dev_id);
> +			else
> +				*act_set = cpfl_act_fwd_port(0, priority, 0,
> dev_id);
> +			act_set++;
> +			rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +			break;
> +		case RTE_FLOW_ACTION_TYPE_QUEUE:
> +			if (!fwd_q && !fwd_jump)
> +				fwd_q = true;
> +			else
> +				goto err;
> +			if (queue_id == -2)
> +				goto err;
> +			act_q = action->conf;
> +			data = itf->data;
> +			if (act_q->index >= data->nb_rx_queues)
> +				goto err;
> +
> +			vport = (struct cpfl_vport *)itf;
> +			if (queue_id < 0)
> +				queue_id = vport-
> >base.chunks_info.rx_start_qid;
> +			queue_id += act_q->index;
> +			*act_set = cpfl_act_set_hash_queue(priority, 0,
> queue_id, 0);
> +			act_set++;
> +			rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +			break;
> +		case RTE_FLOW_ACTION_TYPE_RSS:
> +			rss = action->conf;
> +			if (rss->queue_num <= 1)
> +				goto err;
> +			for (i = 0; i < rss->queue_num - 1; i++) {
> +				if (rss->queue[i + 1] != rss->queue[i] + 1)
> +					goto err;
> +			}
> +			data = itf->data;
> +			if (rss->queue[rss->queue_num - 1] >= data-
> >nb_rx_queues)
> +				goto err;
> +#define FXP_MAX_QREGION_SIZE 128
> +			if (!(rte_is_power_of_2(rss->queue_num) &&
> +			      rss->queue_num <= FXP_MAX_QREGION_SIZE))
> +				goto err;
> +
> +			if (!fwd_q && !fwd_jump)
> +				fwd_q = true;
> +			else
> +				goto err;
> +			if (queue_id == -2)
> +				goto err;
> +			vport = (struct cpfl_vport *)itf;
> +			if (queue_id < 0)
> +				queue_id = vport-
> >base.chunks_info.rx_start_qid;
> +			queue_id += rss->queue[0];
> +			*act_set = cpfl_act_set_hash_queue_region(priority, 0,
> queue_id,
> +								  log(rss-
> >queue_num) / log(2), 0);
> +			act_set++;
> +			rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +			break;
> +		case RTE_FLOW_ACTION_TYPE_DROP:
> +			(*act_set).data = cpfl_act_drop(priority).data;
> +			act_set++;
> +			rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +			(*act_set).data = cpfl_act_set_commit_mode(priority,
> 0).data;
> +			act_set++;
> +			rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +			break;
> +		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
> +		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
> +			break;
> +		case RTE_FLOW_ACTION_TYPE_VOID:
> +			break;
> +		default:
> +			goto err;
> +		}
> +	}
> +
> +	if (mr_action != NULL && !set_meta_valid) {
> +		uint32_t i;
> +
> +		for (i = 0; i < rim->mr_num; i++)
> +			if (cpfl_parse_mod_content(itf->adapter, rinfo,
> +						   &rim->rules[rim->pr_num +
> i],
> +						   &mr_action[i]))
> +				goto err;
> +	}
> +
> +	return 0;
> +
> +err:
> +	PMD_DRV_LOG(ERR, "Invalid action type");
> +	return -EINVAL;
> +}
> +
> [] <...>
> +
> +static int
> +cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
> +			      const struct rte_flow_attr *attr,
> +			      const struct rte_flow_item pattern[],
> +			      const struct rte_flow_action actions[],
> +			      void **meta)
> +{
> +	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
> +	struct cpfl_flow_pr_action pr_action = { 0 };
> +	struct cpfl_adapter_ext *adapter = itf->adapter;
> +	struct cpfl_flow_mr_action mr_action[MAX_MR_ACTION_NUM] = { 0 };
> +	uint32_t pr_num = 0, mr_num = 0;
> +	struct cpfl_vport *vport;
> +	struct rule_info_meta *rim;
> +	bool set_meta_valid = false;
> +	int ret;
> +
> +	if (itf->type == CPFL_ITF_TYPE_VPORT) {
> +		vport = (struct cpfl_vport *)itf;
> +		if (vport->exceptional) {

Exception vport won't be in this release, so remove it.

> +			PMD_DRV_LOG(ERR, "Can't create rte_flow with
> exceptional vport.");
> +			return -EINVAL;
> +		}
> +	}
> +
> +	ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr,
> &pr_action);
> +	if (ret) {
> +		PMD_DRV_LOG(ERR, "No Match pattern support.");
> +		return -EINVAL;
> +	}
> +
> +	if (is_mod_action(actions, &set_meta_valid)) {
> +		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions,
> mr_action);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "action parse fails.");
> +			return -EINVAL;
> +		}
> +		if (!set_meta_valid)
> +			mr_num++;
> +	}
> +
> +	pr_num = 1;
> +	rim = rte_zmalloc(NULL,
> +			  sizeof(struct rule_info_meta) +
> +			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
> +			  0);
> +	if (!rim)
> +		return -ENOMEM;
> +
> +	rim->pr_action = pr_action;
> +	rim->pr_num = pr_num;
> +	rim->mr_num = mr_num;
> +	rim->rule_num = pr_num + mr_num;
> +
> +	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
> +		PMD_DRV_LOG(ERR, "Invalid input set");

Invalid pattern?

> +		rte_free(rim);
> +		return -rte_errno;
> +	}
> +
> +	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority,
> +				  0, false)) {
> +		PMD_DRV_LOG(ERR, "Invalid input set");

Invalid action?

> +		rte_free(rim);
> +		return -rte_errno;
> +	}
> +
> +	cpfl_fill_rinfo_default_value(&rim->rules[0]);
> +
> +	if (!meta)
> +		rte_free(rim);
> +	else
> +		*meta = rim;
> +
> +	return 0;
> +}
> +
> +static int fxp_mod_init(struct cpfl_adapter_ext *ad) {

Check and refine the functions' coding style.

> +	uint32_t size =
> +rte_bitmap_get_memory_footprint(MAX_MOD_CONTENT_INDEX);
> +
> +	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
> +
> +	if (!mem)
> +		return -ENOMEM;
> +
> +	/* a set bit represent a free slot */
> +	ad->mod_bm =
> rte_bitmap_init_with_all_set(MAX_MOD_CONTENT_INDEX, mem, size);
> +	if (!ad->mod_bm) {
> +		rte_free(mem);
> +		return -EINVAL;
> +	}
> +
> +	ad->mod_bm_mem = mem;
> +
> +	return 0;
> +}
> +
> +static void fxp_mod_uninit(struct cpfl_adapter_ext *ad) {
> +	rte_free(ad->mod_bm_mem);
> +	ad->mod_bm_mem = NULL;
> +	ad->mod_bm = NULL;
> +}
> +
> +static uint32_t fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad) {
> +	uint64_t slab = 0;
> +	uint32_t pos = 0;
> +
> +	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
> +		return MAX_MOD_CONTENT_INDEX;
> +
> +	pos += __builtin_ffsll(slab) - 1;
> +	rte_bitmap_clear(ad->mod_bm, pos);
> +
> +	return pos;
> +}
> +
> +static void fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
> +{
> +	rte_bitmap_set(ad->mod_bm, idx);
> +}
 
What's the benefit of the function? Can we call rte_bitmap_set directly?

> +
> +static int
> +cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
> +	       struct rte_flow *flow __rte_unused,
> +	       struct rte_flow_query_count *count __rte_unused,
> +	       struct rte_flow_error *error)
> +{
> +	rte_flow_error_set(error, EINVAL,
> +			   RTE_FLOW_ERROR_TYPE_HANDLE,
> +			   NULL,
> +			   "count action not supported by this module");
> +
> +	return -rte_errno;
> +}
> +
> +static void
> +cpfl_fxp_uninit(struct cpfl_adapter_ext *ad) {
> +	fxp_mod_uninit(ad);
> +}

Why do we need the function wrapper?

> +
> +static int
> +cpfl_fxp_init(struct cpfl_adapter_ext *ad) {
> +	int ret = 0;
> +
> +	ret = fxp_mod_init(ad);
> +	if (ret) {
> +		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
> +		return ret;
> +	}
> +
> +	return ret;
> +}
> +
> +static struct
> +cpfl_flow_engine cpfl_fxp_engine = {
> +	.type = CPFL_FLOW_ENGINE_FXP,
> +	.init = cpfl_fxp_init,
> +	.uninit = cpfl_fxp_uninit,
> +	.create = cpfl_fxp_create,
> +	.destroy = cpfl_fxp_destroy,
> +	.query_count = cpfl_fxp_query,
> +	.parse_pattern_action = cpfl_fxp_parse_pattern_action, };
> +
> +RTE_INIT(cpfl_sw_engine_init)
> +{
> +	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
> +
> +	cpfl_flow_engine_register(engine);
> +}
> diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index
> 4061123034..ce46d7e76e 100644
> --- a/drivers/net/cpfl/meson.build
> +++ b/drivers/net/cpfl/meson.build
> @@ -43,6 +43,7 @@ js_dep = dependency('json-c', required: false, method :
> 'pkg-config')  if js_dep.found()
>      sources += files(
>          'cpfl_flow.c',
> +	'cpfl_flow_engine_fxp.c',
>          'cpfl_flow_parser.c',
>          'cpfl_rules.c',
>          'cpfl_controlq.c',
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v2 0/8] add rte flow support for cpfl
  2023-08-12  7:55 [PATCH v1 0/5] add rte flow support for cpfl Yuying Zhang
                   ` (4 preceding siblings ...)
  2023-08-12  7:55 ` [PATCH v1 5/5] net/cpfl: add fxp flow engine Yuying Zhang
@ 2023-09-01 11:31 ` Yuying Zhang
  2023-09-01 11:31   ` [PATCH v2 1/8] net/cpfl: parse flow parser file in devargs Yuying Zhang
                     ` (8 more replies)
  5 siblings, 9 replies; 128+ messages in thread
From: Yuying Zhang @ 2023-09-01 11:31 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Yuying Zhang

This patchset add rte_flow support for cpfl driver.
It depends on the following patch set:
http://patchwork.dpdk.org/project/dpdk/cover/20230816150541.144532-1-beilei.xing@intel.com/

Wenjing Qiao (4):
  net/cpfl: parse flow parser file in devargs
  net/cpfl: add flow json parser
  net/cpfl: add FXP low level implementation
  net/cpfl: setup ctrl path

Yuying Zhang (4):
  net/cpfl: set up rte flow skeleton
  net/cpfl: add fxp rule module
  net/cpfl: add fxp flow engine
  net/cpfl: add flow support for representor

 doc/guides/nics/cpfl.rst                |   45 +
 doc/guides/rel_notes/release_23_11.rst  |    1 +
 drivers/net/cpfl/cpfl_actions.h         |  858 +++++++++++
 drivers/net/cpfl/cpfl_controlq.c        |  803 ++++++++++
 drivers/net/cpfl/cpfl_controlq.h        |   75 +
 drivers/net/cpfl/cpfl_ethdev.c          |  390 ++++-
 drivers/net/cpfl/cpfl_ethdev.h          |  109 ++
 drivers/net/cpfl/cpfl_flow.c            |  339 +++++
 drivers/net/cpfl/cpfl_flow.h            |   85 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c |  603 ++++++++
 drivers/net/cpfl/cpfl_flow_parser.c     | 1769 +++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h     |  220 +++
 drivers/net/cpfl/cpfl_fxp_rule.c        |  297 ++++
 drivers/net/cpfl/cpfl_fxp_rule.h        |   68 +
 drivers/net/cpfl/cpfl_representor.c     |   29 +
 drivers/net/cpfl/cpfl_rules.c           |  126 ++
 drivers/net/cpfl/cpfl_rules.h           |  306 ++++
 drivers/net/cpfl/cpfl_vchnl.c           |  144 ++
 drivers/net/cpfl/meson.build            |   14 +
 19 files changed, 6280 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v2 1/8] net/cpfl: parse flow parser file in devargs
  2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
@ 2023-09-01 11:31   ` Yuying Zhang
  2023-09-01 11:31   ` [PATCH v2 2/8] net/cpfl: add flow json parser Yuying Zhang
                     ` (7 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Yuying Zhang @ 2023-09-01 11:31 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Add devargs "flow_parser" for rte_flow json parser.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 doc/guides/nics/cpfl.rst       | 32 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.c | 38 +++++++++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_ethdev.h |  3 +++
 drivers/net/cpfl/meson.build   |  6 ++++++
 4 files changed, 78 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index c20334230b..7032dd1a1a 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -128,12 +128,24 @@ Runtime Configuration
 
     -a BDF,representor=vf[0-3],representor=c1pf1
 
+- ``flow_parser`` (default ``not enabled``)
+
+  The PMD supports using a JSON file to parse rte_flow tokens into low level hardware
+  resources defined in a DDP package file.
+
+  The user can specify the path of json file, for example::
+
+    -a ca:00.0,flow_parser="refpkg.json"
+
+  Then the PMD will load json file for device ``ca:00.0``.
+  The parameter is optional.
 
 Driver compilation and testing
 ------------------------------
 
 Refer to the document :doc:`build_and_test` for details.
 
+Rte flow need to install json-c library.
 
 Features
 --------
@@ -164,3 +176,23 @@ Hairpin queue
 E2100 Series can loopback packets from RX port to TX port.
 This feature is called port-to-port or hairpin.
 Currently, the PMD only supports single port hairpin.
+
+Rte_flow
+~~~~~~~~~~~~~
+
+Rte_flow uses a json file to direct CPF PMD to parse rte_flow tokens into
+low level hardware resources defined in a DDP package file.
+
+#. install json-c library::
+
+   .. code-block:: console
+
+   git clone https://github.com/json-c/json-c.git
+   cd json-c
+   git checkout 777dd06be83ef7fac71c2218b565557cd068a714
+
+#. run testpmd with the json file::
+
+   .. code-block:: console
+
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 88c1479f3a..3c4a6a4724 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -21,6 +21,10 @@
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+#define CPFL_FLOW_PARSER	"flow_parser"
+#endif
+
 rte_spinlock_t cpfl_adapter_lock;
 /* A list for all adapters, one adapter matches one PCI device */
 struct cpfl_adapter_list cpfl_adapter_list;
@@ -31,6 +35,9 @@ static const char * const cpfl_valid_args_first[] = {
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	CPFL_FLOW_PARSER,
+#endif
 	NULL
 };
 
@@ -1537,6 +1544,24 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 	return 0;
 }
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+static int
+parse_file(const char *key, const char *value, void *args)
+{
+	char *name = args;
+
+	if (strlen(value) > CPFL_FLOW_FILE_LEN - 1) {
+		PMD_DRV_LOG(ERR, "file path(%s) is too long.", value);
+		return -1;
+	}
+
+	PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key);
+	strlcpy(name, value, CPFL_FLOW_FILE_LEN);
+
+	return 0;
+}
+#endif
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
@@ -1585,7 +1610,18 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 				 &adapter->base.is_rx_singleq);
 	if (ret != 0)
 		goto fail;
-
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) {
+		ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER,
+					 &parse_file, cpfl_args->flow_parser);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to parser flow_parser, ret: %d", ret);
+			goto fail;
+		}
+	} else {
+		cpfl_args->flow_parser[0] = '\0';
+	}
+#endif
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index a891bd8df9..ed730cc0e9 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -87,6 +87,8 @@
 #define ACC_LCE_ID	15
 #define IMC_MBX_EFD_ID	0
 
+#define CPFL_FLOW_FILE_LEN 100
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -100,6 +102,7 @@ struct cpfl_devargs {
 	uint16_t req_vport_nb;
 	uint8_t repr_args_num;
 	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
+	char flow_parser[CPFL_FLOW_FILE_LEN];
 };
 
 struct p2p_queue_chunks_info {
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index fb075c6860..0be25512c3 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -38,3 +38,9 @@ if arch_subdir == 'x86'
         cflags += ['-DCC_AVX512_SUPPORT']
     endif
 endif
+
+js_dep = dependency('json-c', required: false, method : 'pkg-config')
+if js_dep.found()
+    dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
+    ext_deps += js_dep
+endif
\ No newline at end of file
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v2 2/8] net/cpfl: add flow json parser
  2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
  2023-09-01 11:31   ` [PATCH v2 1/8] net/cpfl: parse flow parser file in devargs Yuying Zhang
@ 2023-09-01 11:31   ` Yuying Zhang
  2023-09-01 11:31   ` [PATCH v2 3/8] net/cpfl: add FXP low level implementation Yuying Zhang
                     ` (6 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Yuying Zhang @ 2023-09-01 11:31 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

A JSON file will be used to direct DPDK CPF PMD to
parse rte_flow tokens into low level hardware resources
defined in a DDP package file.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_flow_parser.c | 1769 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  220 ++++
 drivers/net/cpfl/meson.build        |    3 +
 3 files changed, 1992 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h

diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
new file mode 100644
index 0000000000..f14ceefed6
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -0,0 +1,1769 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <arpa/inet.h>
+#include <asm-generic/errno-base.h>
+#include <stdint.h>
+
+#include "cpfl_flow_parser.h"
+#include "cpfl_ethdev.h"
+#include "rte_malloc.h"
+
+static enum rte_flow_item_type
+cpfl_get_item_type_by_str(const char *type)
+{
+	if (strcmp(type, "eth") == 0)
+		return RTE_FLOW_ITEM_TYPE_ETH;
+	else if (strcmp(type, "ipv4") == 0)
+		return RTE_FLOW_ITEM_TYPE_IPV4;
+	else if (strcmp(type, "tcp") == 0)
+		return RTE_FLOW_ITEM_TYPE_TCP;
+	else if (strcmp(type, "udp") == 0)
+		return RTE_FLOW_ITEM_TYPE_UDP;
+	else if (strcmp(type, "vxlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VXLAN;
+	else if (strcmp(type, "icmp") == 0)
+		return RTE_FLOW_ITEM_TYPE_ICMP;
+	else if (strcmp(type, "vlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VLAN;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ITEM_TYPE_VOID;
+}
+
+static enum rte_flow_action_type
+cpfl_get_action_type_by_str(const char *type)
+{
+	if (strcmp(type, "vxlan_encap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	else if (strcmp(type, "vxlan_decap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ACTION_TYPE_VOID;
+}
+
+static const char *
+cpfl_json_object_to_string(json_object *object, const char *name)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return NULL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return NULL;
+	}
+
+	return json_object_get_string(subobject);
+}
+
+static int
+cpfl_json_object_to_int(json_object *object, const char *name, int *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_json_object_to_uint16(json_object *object, const char *name, uint16_t *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_json_object_to_uint32(json_object *object, const char *name, uint32_t *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int64(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_attr(json_object *cjson_pr_key_attr, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len;
+	struct cpfl_flow_js_pr_key_attr *attr;
+
+	len = json_object_array_length(cjson_pr_key_attr);
+	js_pr->key.attributes = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_attr), 0);
+	if (!js_pr->key.attributes) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->key.attr_size = len;
+	attr = js_pr->key.attributes;
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		const char *name;
+		uint16_t value = 0;
+		int ret;
+
+		object = json_object_array_get_idx(cjson_pr_key_attr, i);
+		name = cpfl_json_object_to_string(object, "Name");
+		if (!name) {
+			rte_free(js_pr->key.attributes);
+			PMD_DRV_LOG(ERR, "Can not parse string 'Name'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(object, "Value", &value);
+		if (ret < 0) {
+			rte_free(js_pr->key.attributes);
+			PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+			return -EINVAL;
+		}
+		if (strcmp(name, "ingress") == 0) {
+			attr->ingress = value;
+		} else if (strcmp(name, "egress") == 0) {
+			attr->egress = value;
+		} else {
+			/* TODO: more... */
+			rte_free(js_pr->key.attributes);
+			PMD_DRV_LOG(ERR, "Not support attr name: %s.", name);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto_field(json_object *cjson_field,
+				     struct cpfl_flow_js_pr_key_proto *js_field)
+{
+	int len, i;
+
+	if (!cjson_field)
+		return 0;
+	len = json_object_array_length(cjson_field);
+	js_field->fields_size = len;
+	if (len == 0)
+		return 0;
+	js_field->fields =
+	    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len, 0);
+	if (!js_field->fields) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		const char *name, *mask;
+
+		object = json_object_array_get_idx(cjson_field, i);
+		name = cpfl_json_object_to_string(object, "name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
+			goto err;
+		}
+		if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+			PMD_DRV_LOG(ERR, "The 'name' is too long.");
+			goto err;
+		}
+		memcpy(js_field->fields[i].name, name, strlen(name));
+
+		if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
+		    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			mask = cpfl_json_object_to_string(object, "mask");
+			if (!mask) {
+				PMD_DRV_LOG(ERR, "Can not parse string 'mask'.");
+				goto err;
+			}
+			memcpy(js_field->fields[i].mask, mask, strlen(mask));
+		} else {
+			uint32_t mask_32b;
+			int ret;
+
+			ret = cpfl_json_object_to_uint32(object, "mask", &mask_32b);
+			if (ret < 0) {
+				PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'.");
+				goto err;
+			}
+			js_field->fields[i].mask_32b = mask_32b;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_field->fields);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto(json_object *cjson_pr_key_proto, struct cpfl_flow_js_pr *js_pr)
+{
+	int len, i, ret;
+
+	len = json_object_array_length(cjson_pr_key_proto);
+	js_pr->key.proto_size = len;
+	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto) * len, 0);
+	if (!js_pr->key.protocols) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_object *object, *cjson_pr_key_proto_fields;
+		const char *type;
+		enum rte_flow_item_type item_type;
+
+		object = json_object_array_get_idx(cjson_pr_key_proto, i);
+		/* pr->key->proto->type */
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		item_type = cpfl_get_item_type_by_str(type);
+		if (item_type == RTE_FLOW_ITEM_TYPE_VOID)
+			goto err;
+		js_pr->key.protocols[i].type = item_type;
+		/* pr->key->proto->fields */
+		cjson_pr_key_proto_fields = json_object_object_get(object, "fields");
+		ret = cpfl_flow_js_pattern_key_proto_field(cjson_pr_key_proto_fields,
+							   &js_pr->key.protocols[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	rte_free(js_pr->key.protocols);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_proto(json_object *cjson_value, struct cpfl_flow_js_fv *js_fv)
+{
+	uint16_t layer = 0, offset = 0, mask = 0;
+	const char *header;
+	enum rte_flow_item_type type;
+	int ret;
+
+	ret = cpfl_json_object_to_uint16(cjson_value, "layer", &layer);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+		return -EINVAL;
+	}
+
+	header = cpfl_json_object_to_string(cjson_value, "header");
+	if (!header) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_object_to_uint16(cjson_value, "offset", &offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_object_to_uint16(cjson_value, "mask", &mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
+		return -EINVAL;
+	}
+	js_fv->proto.layer = layer;
+	js_fv->proto.offset = offset;
+	js_fv->proto.mask = mask;
+	type = cpfl_get_item_type_by_str(header);
+	if (type == RTE_FLOW_ITEM_TYPE_VOID)
+		return -EINVAL;
+	js_fv->proto.header = type;
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv(json_object *cjson_fv, struct cpfl_flow_js_pr_action *js_act)
+{
+	int len, i;
+
+	len = json_object_array_length(cjson_fv);
+	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0);
+	if (!js_act->sem.fv) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_act->sem.fv_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_fv *js_fv;
+		json_object *object, *cjson_value;
+		uint16_t offset = 0;
+		const char *type;
+		int ret;
+
+		object = json_object_array_get_idx(cjson_fv, i);
+		js_fv = &js_act->sem.fv[i];
+		ret = cpfl_json_object_to_uint16(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_fv->offset = offset;
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		cjson_value = json_object_object_get(object, "value");
+		if (strcmp(type, "immediate") == 0) {
+			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
+			js_fv->immediate = json_object_get_int(cjson_value);
+		}  else if (strcmp(type, "protocol") == 0) {
+			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
+			cpfl_flow_js_pattern_act_fv_proto(cjson_value, js_fv);
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_act->sem.fv);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_per_act(json_object *cjson_per_act, struct cpfl_flow_js_pr_action *js_act)
+{
+	const char *type;
+	int ret;
+
+	/* pr->actions->type */
+	type = cpfl_json_object_to_string(cjson_per_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* pr->actions->data */
+	if (strcmp(type, "sem") == 0) {
+		json_object *cjson_fv, *cjson_pr_action_sem;
+
+		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+		cjson_pr_action_sem = json_object_object_get(cjson_per_act, "data");
+		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem, "profile",
+						 &js_act->sem.prof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem, "subprofile",
+						 &js_act->sem.subprof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem, "keysize",
+						 &js_act->sem.keysize);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
+			return -EINVAL;
+		}
+		cjson_fv = json_object_object_get(cjson_pr_action_sem, "fieldvectors");
+		ret = cpfl_flow_js_pattern_act_fv(cjson_fv, js_act);
+		if (ret < 0)
+			return ret;
+	} else {
+		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act(json_object *cjson_pr_act, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len, ret;
+
+	len = json_object_array_length(cjson_pr_act);
+	js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action) * len, 0);
+	if (!js_pr->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->actions_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_pr_action *js_act;
+		json_object *object;
+
+		object = json_object_array_get_idx(cjson_pr_act, i);
+		js_act = &js_pr->actions[i];
+		ret = cpfl_flow_js_pattern_per_act(object, js_act);
+		if (ret < 0) {
+			rte_free(js_pr->actions);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_rule(json_object *json_root, struct cpfl_flow_js_parser *parser)
+{
+	json_object *cjson_pr;
+	int i, len;
+
+	/* Pattern Rules */
+	cjson_pr = json_object_object_get(json_root, "patterns");
+	if (!cjson_pr) {
+		PMD_DRV_LOG(ERR, "The patterns is mandatory.");
+		return -EINVAL;
+	}
+
+	len = json_object_array_length(cjson_pr);
+	parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len, 0);
+	if (!parser->patterns) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	parser->pr_size = len;
+	for (i = 0; i < len; i++) {
+		json_object *object, *cjson_pr_actions, *cjson_pr_key, *cjson_pr_key_proto,
+		    *cjson_pr_key_attr;
+		int ret;
+
+		object = json_object_array_get_idx(cjson_pr, i);
+		/* pr->key */
+		cjson_pr_key = json_object_object_get(object, "key");
+		/* pr->key->protocols */
+		cjson_pr_key_proto = json_object_object_get(cjson_pr_key, "protocols");
+		ret = cpfl_flow_js_pattern_key_proto(cjson_pr_key_proto, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->protocols.");
+			goto err;
+		}
+		/* pr->key->attributes */
+		cjson_pr_key_attr = json_object_object_get(cjson_pr_key, "attributes");
+		ret = cpfl_flow_js_pattern_key_attr(cjson_pr_key_attr, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->attributes.");
+			goto err;
+		}
+		/* pr->actions */
+		cjson_pr_actions = json_object_object_get(object, "actions");
+		ret = cpfl_flow_js_pattern_act(cjson_pr_actions, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->patterns);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_key(json_object *cjson_mr_key, struct cpfl_flow_js_mr_key *js_mr_key)
+{
+	int len, i;
+
+	len = json_object_array_length(cjson_mr_key);
+	js_mr_key->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_key_action) * len, 0);
+	if (!js_mr_key->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_mr_key->actions_size = len;
+	for (i = 0; i < len; i++) {
+		json_object *object, *cjson_mr_key_data;
+		const char *type;
+		enum rte_flow_action_type act_type;
+
+		object = json_object_array_get_idx(cjson_mr_key, i);
+		/* mr->key->actions->type */
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		act_type = cpfl_get_action_type_by_str(type);
+		if (act_type == RTE_FLOW_ACTION_TYPE_VOID)
+			goto err;
+		js_mr_key->actions[i].type = act_type;
+		/* mr->key->actions->data */
+		cjson_mr_key_data = json_object_object_get(object, "data");
+		if (js_mr_key->actions[i].type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			json_object *cjson_mr_key_proto;
+			int proto_size, j;
+			struct cpfl_flow_js_mr_key_action_vxlan_encap *encap;
+
+			cjson_mr_key_proto = json_object_object_get(cjson_mr_key_data, "protocols");
+			encap = &js_mr_key->actions[i].encap;
+			if (!cjson_mr_key_proto) {
+				encap->proto_size = 0;
+				continue;
+			}
+			proto_size = json_object_array_length(cjson_mr_key_proto);
+			encap->proto_size = proto_size;
+			for (j = 0; j < proto_size; j++) {
+				const char *s;
+				json_object *subobject;
+				enum rte_flow_item_type proto_type;
+
+				subobject = json_object_array_get_idx(cjson_mr_key_proto, j);
+				s = json_object_get_string(subobject);
+				proto_type = cpfl_get_item_type_by_str(s);
+				if (proto_type == RTE_FLOW_ITEM_TYPE_VOID) {
+					PMD_DRV_LOG(ERR, "parse VXLAN_ENCAP failed.");
+					goto err;
+				}
+				encap->protocols[j] = proto_type;
+			}
+		} else if (js_mr_key->actions[i].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			PMD_DRV_LOG(ERR, "not support this type: %d.", js_mr_key->actions[i].type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mr_key->actions);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_layout(json_object *cjson_layout, struct cpfl_flow_js_mr_action_mod *js_mod)
+{
+	int len, i;
+
+	len = json_object_array_length(cjson_layout);
+	js_mod->layout_size = len;
+	if (len == 0)
+		return 0;
+	js_mod->layout = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_layout) * len, 0);
+	if (!js_mod->layout) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		int index = 0, size = 0, offset = 0, ret;
+		const char *hint;
+
+		object = json_object_array_get_idx(cjson_layout, i);
+		ret = cpfl_json_object_to_int(object, "index", &index);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'index'.");
+			goto err;
+		}
+		js_mod->layout[i].index = index;
+		ret = cpfl_json_object_to_int(object, "size", &size);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+			goto err;
+		}
+		js_mod->layout[i].size = size;
+		ret = cpfl_json_object_to_int(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_mod->layout[i].offset = offset;
+		hint = cpfl_json_object_to_string(object, "hint");
+		if (!hint) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'hint'.");
+			goto err;
+		}
+		memcpy(js_mod->layout[i].hint, hint, strlen(hint));
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mod->layout);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_action(json_object *cjson_mr_act, struct cpfl_flow_js_mr_action *js_mr_act)
+{
+	json_object *cjson_mr_action_data;
+	const char *type;
+
+	/* mr->action->type */
+	type = cpfl_json_object_to_string(cjson_mr_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* mr->action->data */
+	cjson_mr_action_data = json_object_object_get(cjson_mr_act, "data");
+	if (strcmp(type, "mod") == 0) {
+		json_object *layout;
+		uint16_t profile = 0;
+		int ret;
+
+		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		ret = cpfl_json_object_to_uint16(cjson_mr_action_data, "profile", &profile);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		js_mr_act->mod.prof = profile;
+		layout = json_object_object_get(cjson_mr_action_data, "layout");
+		ret = cpfl_flow_js_mr_layout(layout, &js_mr_act->mod);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse layout.");
+			return ret;
+		}
+	} else  {
+		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_mod_rule(json_object *json_root, struct cpfl_flow_js_parser *parser)
+{
+	json_object *cjson_mr;
+	int i, len;
+
+	cjson_mr = json_object_object_get(json_root, "modifications");
+	if (!cjson_mr) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+	len = json_object_array_length(cjson_mr);
+	parser->mr_size = len;
+	if (len == 0)
+		return 0;
+	parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr) * len, 0);
+	if (!parser->modifications) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		int ret;
+		json_object *object, *cjson_mr_key, *cjson_mr_action, *cjson_mr_key_action;
+
+		object = json_object_array_get_idx(cjson_mr, i);
+		/* mr->key */
+		cjson_mr_key = json_object_object_get(object, "key");
+		/* mr->key->actions */
+		cjson_mr_key_action = json_object_object_get(cjson_mr_key, "actions");
+		ret = cpfl_flow_js_mr_key(cjson_mr_key_action, &parser->modifications[i].key);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_key failed.");
+			goto err;
+		}
+		/* mr->action */
+		cjson_mr_action = json_object_object_get(object, "action");
+		ret = cpfl_flow_js_mr_action(cjson_mr_action, &parser->modifications[i].action);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_action failed.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->modifications);
+	return -EINVAL;
+}
+
+static int
+cpfl_parser_init(json_object *json_root, struct cpfl_flow_js_parser *parser)
+{
+	int ret = 0;
+
+	ret = cpfl_flow_js_pattern_rule(json_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
+		return ret;
+	}
+	ret = cpfl_flow_js_mod_rule(json_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char *filename)
+{
+	struct cpfl_flow_js_parser *parser;
+	json_object *root;
+	int ret;
+
+	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser), 0);
+	if (!parser) {
+		PMD_DRV_LOG(ERR, "Not enough memory to create flow parser.");
+		return -ENOMEM;
+	}
+	root = json_object_from_file(filename);
+	if (!root) {
+		PMD_DRV_LOG(ERR, "Can not load JSON file: %s.", filename);
+		rte_free(parser);
+		return -EINVAL;
+	}
+	ret = cpfl_parser_init(root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parser init failed.");
+		goto free_parser;
+	}
+	*flow_parser = parser;
+	ret = json_object_put(root);
+	if (ret != 1) {
+		PMD_DRV_LOG(ERR, "Free json_object failed.");
+		return -EINVAL;
+	}
+
+	return 0;
+free_parser:
+	rte_free(parser);
+	return -EINVAL;
+}
+
+static void
+cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
+{
+	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM)
+		rte_free(pr_act->sem.fv);
+}
+
+int
+cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
+{
+	int i, j;
+
+	if (!parser)
+		return 0;
+
+	for (i = 0; i < parser->pr_size; i++) {
+		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
+
+		if (!pattern)
+			return -EINVAL;
+		for (j = 0; j < pattern->key.proto_size; j++)
+			rte_free(pattern->key.protocols[j].fields);
+		rte_free(pattern->key.protocols);
+		rte_free(pattern->key.attributes);
+
+		for (j = 0; j < pattern->actions_size; j++) {
+			struct cpfl_flow_js_pr_action *pr_act;
+
+			pr_act = &pattern->actions[j];
+			cpfl_parser_free_pr_action(pr_act);
+		}
+		rte_free(pattern->actions);
+	}
+	rte_free(parser->patterns);
+	for (i = 0; i < parser->mr_size; i++) {
+		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
+
+		if (!mr)
+			return -EINVAL;
+		rte_free(mr->key.actions);
+		rte_free(mr->action.mod.layout);
+	}
+	rte_free(parser->modifications);
+	rte_free(parser);
+
+	return 0;
+}
+
+static int
+cpfl_get_items_length(const struct rte_flow_item *items)
+{
+	int length = 0;
+	const struct rte_flow_item *item = items;
+
+	while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_get_actions_length(const struct rte_flow_action *actions)
+{
+	int length = 0;
+	const struct rte_flow_action *action = actions;
+
+	while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
+		       uint16_t offset, uint8_t *fv)
+{
+	uint16_t v_layer, v_offset, v_mask;
+	enum rte_flow_item_type v_header;
+	int j, layer, length;
+	uint16_t temp_fv;
+
+	length = cpfl_get_items_length(items);
+	v_layer = js_fv->proto.layer;
+	v_header = js_fv->proto.header;
+	v_offset = js_fv->proto.offset;
+	v_mask = js_fv->proto.mask;
+	layer = 0;
+	for (j = 0; j < length - 1; j++) {
+		if (items[j].type == v_header) {
+			if (layer == v_layer) {
+				/* copy out 16 bits from offset */
+				const uint8_t *pointer;
+
+				pointer = &(((const uint8_t *)(items[j].spec))[v_offset]);
+				temp_fv = ntohs((*((const uint16_t *)pointer)) & v_mask);
+				fv[2 * offset] = (uint8_t)((temp_fv & 0xff00) >> 8);
+				fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+				break;
+			}
+			layer++;
+		} /* TODO: more type... */
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_fieldvectors(struct cpfl_flow_js_fv *js_fvs, int size, uint8_t *fv,
+			const struct rte_flow_item *items)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		uint16_t offset, temp_fv, value_int;
+		enum cpfl_flow_js_fv_type type;
+		struct cpfl_flow_js_fv *js_fv;
+
+		js_fv = &js_fvs[i];
+		offset = js_fv->offset;
+		type = js_fv->type;
+		if (type == CPFL_FV_TYPE_IMMEDIATE) {
+			value_int = js_fv->immediate;
+			temp_fv = (value_int << 8) & 0xff00;
+			fv[2 * offset] = (uint8_t)((temp_fv & 0xff00) >> 8);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+		} else if (type == CPFL_FV_TYPE_PROTOCOL) {
+			ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv);
+			if (ret)
+				return ret;
+		} else {
+			PMD_DRV_LOG(DEBUG, "not support this type: %d.", type);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_pr_actions(struct cpfl_flow_js_pr_action *actions,
+		      int size,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		struct cpfl_flow_js_pr_action *pr_act;
+		enum cpfl_flow_pr_action_type type;
+
+		pr_act = &actions[i];
+		/* pr->actions->type */
+		type = pr_act->type;
+		/* pr->actions->data */
+		if (attr->group % 10 == 1  && type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+			struct cpfl_flow_js_pr_action_sem *sem = &pr_act->sem;
+
+			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+			pr_action->sem.prof = sem->prof;
+			pr_action->sem.subprof = sem->subprof;
+			pr_action->sem.keysize = sem->keysize;
+			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
+			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
+			ret = cpfl_parse_fieldvectors(sem->fv, sem->fv_size,
+						      pr_action->sem.cpfl_flow_pr_fv, items);
+			return ret;
+		} else if (attr->group > 4 || attr->group == 0) {
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_str2mac(const char *mask, uint8_t *addr_bytes)
+{
+	int i, size, j;
+	uint8_t n;
+
+	size = strlen(mask);
+	n = 0;
+	j = 0;
+	for (i = 0; i < size; i++) {
+		char ch = mask[i];
+
+		if (ch == ':') {
+			if (j >= RTE_ETHER_ADDR_LEN)
+				return -EINVAL;
+			addr_bytes[j++] = n;
+			n = 0;
+		} else if (ch >= 'a' && ch <= 'f') {
+			n = n * 16 + ch - 'a' + 10;
+		} else if (ch >= 'A' && ch <= 'F') {
+			n = n * 16 + ch - 'A' + 10;
+		} else if (ch >= '0' && ch <= '9') {
+			n = n * 16 + ch - '0';
+		} else {
+			return -EINVAL;
+		}
+	}
+	if (j < RTE_ETHER_ADDR_LEN)
+		addr_bytes[j++] = n;
+
+	if (j != RTE_ETHER_ADDR_LEN)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth_mask(const char *mask, const uint8_t addr_bytes[RTE_ETHER_ADDR_LEN])
+{
+	int i, ret;
+	uint8_t mask_bytes[RTE_ETHER_ADDR_LEN] = { 0 };
+
+	ret = cpfl_str2mac(mask, mask_bytes);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "translate mac address from string to uint8_t[] failed.");
+		return -EINVAL;
+	}
+	/* validate eth mask addr if match */
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+		if (mask_bytes[i] != addr_bytes[i])
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
+{
+	uint32_t out_addr;
+
+	/* 0: success; -EINVAL: invalid; -ENOTSUP: fail */
+	int ret = inet_pton(AF_INET, mask, &out_addr);
+
+	if (ret < 0)
+		return -EINVAL;
+	/* validate ipv4 mask addr if match */
+	if (out_addr != addr)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_eth *eth_mask)
+{
+	int field_size, j;
+	int flag_dst_addr, flag_src_addr, flag_ether_type;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !eth_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_ether_type = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name, *s_mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst, more see Field Mapping
+		 */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		if (strcmp(name, "src_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->src.addr_bytes) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->dst.addr_bytes) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "ether_type") == 0) {
+			uint16_t mask = (uint16_t)field->mask_32b;
+
+			if (mask != eth_mask->type)
+				return -EINVAL;
+			flag_ether_type = true;
+		} else {
+			/* TODO: more type... */
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (strcmp((const char *)eth_mask->src.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (strcmp((const char *)eth_mask->dst.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_ether_type) {
+		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+	int field_size, j;
+	int flag_next_proto_id, flag_src_addr, flag_dst_addr;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !ipv4_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_next_proto_id = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+
+		field = &proto->fields[j];
+		name = field->name;
+		if (strcmp(name, "src_addr") == 0) {
+			/* match: rte_flow_item->mask */
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.src_addr) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.dst_addr) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "next_proto_id") == 0) {
+			uint8_t mask;
+
+			mask = (uint8_t)field->mask_32b;
+			if (mask != ipv4_mask->hdr.next_proto_id)
+				return -EINVAL;
+			flag_next_proto_id = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_next_proto_id) {
+		if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_tcp *tcp_mask)
+{
+	int field_size, j;
+	int flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !tcp_mask)
+		return 0;
+
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (tcp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (tcp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (tcp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (tcp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_udp *udp_mask)
+{
+	int field_size, j;
+	bool flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && !udp_mask)
+		return 0;
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst */
+		name = field->name; /* match: rte_flow_item->mask */
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (udp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (udp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (udp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (udp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto,
+		 const struct rte_flow_item_vxlan *vxlan_mask)
+{
+	int field_size, j;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && !vxlan_mask)
+		return 0;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		int64_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		mask = (int64_t)field->mask_32b;
+		if (strcmp(name, "vx_vni") == 0) {
+			if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) != mask)
+				return -EINVAL;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_icmp *icmp_mask)
+{
+	int field_size;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !icmp_mask)
+		return -EINVAL;
+	if (field_size == 0 && icmp_mask)
+		return -EINVAL;
+	if (field_size == 0 && !icmp_mask)
+		return 0;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
+			     int proto_size,
+			     const struct rte_flow_item *items)
+{
+	int i, length;
+	int j = 0;
+
+	length = cpfl_get_items_length(items);
+	if (proto_size > length - 1)
+		return -EINVAL;
+	for (i = 0; i < proto_size; i++) {
+		struct cpfl_flow_js_pr_key_proto *key_proto;
+		enum rte_flow_item_type type;
+
+		key_proto = &protocols[i];
+		/* pr->key->proto->type */
+		type = key_proto->type;
+		/* pr->key->proto->fields */
+		switch (type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
+				const struct rte_flow_item_eth *eth_mask;
+				int ret;
+
+				eth_mask = (const struct rte_flow_item_eth *)items[i].mask;
+				ret = cpfl_check_eth(key_proto, eth_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				const struct rte_flow_item_ipv4 *ipv4_mask;
+				int ret;
+
+				ipv4_mask = (const struct rte_flow_item_ipv4 *)items[i].mask;
+				ret = cpfl_check_ipv4(key_proto, ipv4_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) {
+				const struct rte_flow_item_tcp *tcp_mask;
+				int ret;
+
+				tcp_mask = (const struct rte_flow_item_tcp *)items[i].mask;
+				ret = cpfl_check_tcp(key_proto, tcp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) {
+				const struct rte_flow_item_udp *udp_mask;
+				int ret;
+
+				udp_mask = (const struct rte_flow_item_udp *)items[i].mask;
+				ret = cpfl_check_udp(key_proto, udp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+				const struct rte_flow_item_vxlan *vxlan_mask;
+				int ret;
+
+				vxlan_mask = (const struct rte_flow_item_vxlan *)items[i].mask;
+				ret = cpfl_check_vxlan(key_proto, vxlan_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) {
+				const struct rte_flow_item_icmp *icmp_mask;
+				int ret;
+
+				icmp_mask = (const struct rte_flow_item_icmp *)items[i].mask;
+				ret = cpfl_check_icmp(key_proto, icmp_mask);
+				if (ret < 0)
+					return ret;
+
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+	if (items[j].type != RTE_FLOW_ITEM_TYPE_END)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr,
+			    const struct rte_flow_attr *attr)
+{
+	/* match: struct rte_flow_attr(ingress,egress) */
+	if (key_attr->ingress != attr->ingress) {
+		PMD_DRV_LOG(DEBUG, "ingress not match.");
+		return -EINVAL;
+	}
+	if (key_attr->egress != attr->egress) {
+		PMD_DRV_LOG(DEBUG, "egress not match.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern,
+		       const struct rte_flow_item *items,
+		       const struct rte_flow_attr *attr)
+{
+	int ret;
+
+	/* pr->key */
+	/* pr->key->protocols */
+	ret = cpfl_check_pattern_key_proto(pattern->key.protocols,
+					   pattern->key.proto_size, items);
+	if (ret < 0)
+		return -EINVAL;
+	/* pr->key->attributes */
+	ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr);
+	if (ret < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* output: struct cpfl_flow_pr_action* pr_action */
+static int
+cpfl_parse_pattern_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_item *items,
+			 const struct rte_flow_attr *attr,
+			 struct cpfl_flow_pr_action *pr_action)
+{
+	int i, size;
+	struct cpfl_flow_js_pr *pattern;
+
+	size = parser->pr_size;
+	for (i = 0; i < size; i++) {
+		int ret;
+
+		pattern = &parser->patterns[i];
+		ret = cpfl_check_pattern_key(pattern, items, attr);
+		if (ret < 0)
+			continue;
+		/* pr->actions */
+		ret = cpfl_parse_pr_actions(pattern->actions, pattern->actions_size, items, attr,
+					    pr_action);
+		return ret;
+	}
+
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_items(struct cpfl_flow_js_parser *parser, const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int ret;
+
+	/* Pattern Rules */
+	ret = cpfl_parse_pattern_rules(parser, items, attr, pr_action);
+	return ret;
+}
+
+/* modifications rules */
+static int
+cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap *encap,
+			       const struct rte_flow_action *action)
+{
+	const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+	struct rte_flow_item *definition;
+	int def_length, i, proto_size;
+
+	action_vxlan_encap = (const struct rte_flow_action_vxlan_encap *)action->conf;
+	definition = action_vxlan_encap->definition;
+	def_length = cpfl_get_items_length(definition);
+	proto_size = encap->proto_size;
+	if (proto_size != def_length - 1) {
+		PMD_DRV_LOG(DEBUG, "protocols not match.");
+		return -EINVAL;
+	}
+	for (i = 0; i < proto_size; i++) {
+		enum rte_flow_item_type proto;
+
+		proto = encap->protocols[i];
+		if (proto == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) {
+				PMD_DRV_LOG(DEBUG, "protocols not match.");
+				return -EINVAL;
+			}
+		} else if (proto != definition[i].type) {
+			PMD_DRV_LOG(DEBUG, "protocols not match.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* output: struct cpfl_flow_mr_key_action *mr_key_action */
+/* check and parse */
+static int
+cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int size,
+			 const struct rte_flow_action *actions,
+			 struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int actions_length, i;
+	int j = 0;
+	int ret;
+
+	actions_length = cpfl_get_actions_length(actions);
+	if (size > actions_length - 1)
+		return -EINVAL;
+	for (i = 0; i < size; i++) {
+		enum rte_flow_action_type type;
+		struct cpfl_flow_js_mr_key_action *key_act;
+
+		key_act = &key_acts[i];
+		/* mr->key->actions->type */
+		type = key_act->type;
+		/* mr->key->actions->data */
+		/* match: <type> action matches RTE_FLOW_ACTION_TYPE_<type> */
+		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			int proto_size, k;
+			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
+
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+			mr_key_action[i].encap.action = &actions[j];
+			encap = &mr_key_action[i].encap;
+
+			proto_size = key_act->encap.proto_size;
+			encap->proto_size = proto_size;
+			for (k = 0; k < proto_size; k++) {
+				enum rte_flow_item_type proto;
+
+				proto = key_act->encap.protocols[k];
+				encap->protocols[k] = proto;
+			}
+			ret = cpfl_check_actions_vxlan_encap(encap, &actions[j]);
+			if (ret < 0)
+				return -EINVAL;
+
+			j++;
+		} else if (type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+			j++;
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+/* output: uint8_t *buffer, uint16_t *byte_len */
+static int
+cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
+		  struct cpfl_flow_mr_key_action *mr_key_action,
+		  uint8_t *buffer, uint16_t *byte_len)
+{
+	int i;
+	int start = 0;
+
+	for (i = 0; i < layout_size; i++) {
+		int index, size, offset;
+		const char *hint;
+		const uint8_t *addr;
+		struct cpfl_flow_mr_key_action *temp;
+		struct cpfl_flow_js_mr_layout *layout;
+
+		layout = &layouts[i];
+		/* index links to the element of the actions array. */
+		index = layout->index;
+		size = layout->size;
+		offset = layout->offset;
+		if (index == -1) {
+			hint = "dummpy";
+			start += size;
+			continue;
+		}
+		hint = layout->hint;
+		addr = NULL;
+		temp = mr_key_action + index;
+		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+			struct rte_flow_item *definition;
+			int def_length, k;
+
+			action_vxlan_encap =
+			    (const struct rte_flow_action_vxlan_encap *)temp->encap.action->conf;
+			definition = action_vxlan_encap->definition;
+			def_length = cpfl_get_items_length(definition);
+			for (k = 0; k < def_length - 1; k++) {
+				if ((strcmp(hint, "eth") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_ETH) ||
+				    (strcmp(hint, "ipv4") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+				    (strcmp(hint, "udp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_UDP) ||
+				    (strcmp(hint, "tcp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_TCP) ||
+				    (strcmp(hint, "vxlan") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_VXLAN)) {
+					addr = (const uint8_t *)(definition[k].spec);
+					if (start > 255) {
+						*byte_len = 0;
+						PMD_DRV_LOG(ERR, "byte length is too long%s",
+							    hint);
+						return -EINVAL;
+					}
+					memcpy(buffer + start, addr + offset, size);
+					break;
+				} /* TODO: more hint... */
+			}
+			if (k == def_length - 1) {
+				*byte_len = 0;
+				PMD_DRV_LOG(ERR, "can not find corresponding hint: %s", hint);
+				return -EINVAL;
+			}
+		} else {
+			*byte_len = 0;
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", temp->type);
+			return -EINVAL;
+		}
+		/* else TODO: more type... */
+
+		start += size;
+	}
+	*byte_len = start;
+
+	return 0;
+}
+
+static int
+cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action,
+		     struct cpfl_flow_mr_key_action *mr_key_action,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	enum cpfl_flow_mr_action_type type;
+
+	/* mr->action->type */
+	type = action->type;
+	/* mr->action->data */
+	if (type == CPFL_JS_MR_ACTION_TYPE_MOD) {
+		struct cpfl_flow_js_mr_layout *layout;
+
+		mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		mr_action->mod.byte_len = 0;
+		mr_action->mod.prof = action->mod.prof;
+		layout = action->mod.layout;
+		if (layout) {
+			int ret;
+
+			memset(mr_action->mod.data, 0, sizeof(mr_action->mod.data));
+			ret = cpfl_parse_layout(layout, action->mod.layout_size, mr_key_action,
+						mr_action->mod.data, &mr_action->mod.byte_len);
+			if (ret < 0)
+				return -EINVAL;
+		}
+		return 0;
+	}
+	PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+
+	return -EINVAL;
+}
+
+static int
+cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action *actions,
+		   struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int key_action_size;
+
+	/* mr->key->actions */
+	key_action_size = mr->key.actions_size;
+	return cpfl_parse_mr_key_action(mr->key.actions, key_action_size, actions, mr_key_action);
+}
+
+/* output: struct cpfl_flow_mr_action *mr_action */
+static int
+cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	int i, size;
+	struct cpfl_flow_mr_key_action mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0};
+
+	size = parser->mr_size;
+
+	for (i = 0; i < size; i++) {
+		int ret;
+		struct cpfl_flow_js_mr *mr;
+
+		mr = &parser->modifications[i];
+		ret = cpfl_check_mod_key(mr, actions, mr_key_action);
+		if (ret < 0)
+			continue;
+		/* mr->action */
+		ret = cpfl_parse_mr_action(&mr->action, mr_key_action, mr_action);
+		if (!ret)
+			return 0;
+	}
+
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+			struct cpfl_flow_mr_action *mr_action)
+{
+	/* modifications rules */
+	if (!parser->modifications) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	return cpfl_parse_mod_rules(parser, actions, mr_action);
+}
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
new file mode 100644
index 0000000000..9f118aaf71
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <json-c/json.h>
+#include <rte_flow.h>
+
+#ifndef _CPFL_FLOW_PARSER_H_
+#define _CPFL_FLOW_PARSER_H_
+
+#define CPFL_FLOW_JSON_STR_SIZE_MAX 100
+#define CPFL_MAX_SEM_FV_KEY_SIZE 64
+#define CPFL_FLOW_JS_PROTO_SIZE 16
+#define CPFL_MOD_KEY_NUM_MAX 8
+
+/* Pattern Rules Storage */
+enum cpfl_flow_pr_action_type {
+	CPFL_JS_PR_ACTION_TYPE_SEM,
+	CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
+};
+
+/* This structure matches a sequence of fields in struct rte_flow_attr */
+struct cpfl_flow_js_pr_key_attr {
+	uint16_t ingress;
+	uint16_t egress;
+};
+
+struct cpfl_flow_js_pr_key_proto_field {
+	char name[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	union {
+		char mask[CPFL_FLOW_JSON_STR_SIZE_MAX];
+		uint32_t mask_32b;
+	};
+};
+
+/* This structure matches a sequence of "struct rte_flow_item" */
+struct cpfl_flow_js_pr_key_proto {
+	enum rte_flow_item_type type;
+	struct cpfl_flow_js_pr_key_proto_field *fields;
+	int fields_size;
+};
+
+enum cpfl_flow_js_fv_type {
+	CPFL_FV_TYPE_PROTOCOL,
+	CPFL_FV_TYPE_IMMEDIATE,
+	CPFL_FV_TYPE_UNKNOWN = -1,
+};
+
+struct cpfl_flow_js_fv {
+	uint16_t offset;
+	enum cpfl_flow_js_fv_type type;
+	union {
+		uint16_t immediate;
+		struct {
+			uint16_t layer;
+			enum rte_flow_item_type header;
+			uint16_t offset;
+			uint16_t mask;
+		} proto;
+	};
+};
+
+/**
+ * This structure defines the message be used to composite the
+ * profile / key of a SEM control packet
+ */
+struct cpfl_flow_js_pr_action_sem {
+	uint16_t prof;		    /* SEM profile ID */
+	uint16_t subprof;	    /* SEM subprofile ID */
+	uint16_t keysize;	    /*  extract key size in bytes */
+	struct cpfl_flow_js_fv *fv; /* A SEM field vector array */
+	int fv_size;
+};
+
+struct cpfl_flow_js_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_js_pr_action_sem sem;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD how to parse rte_flow
+ * protocol headers. Each rule be described by a key object and a action array.
+ */
+struct cpfl_flow_js_pr {
+	struct {
+		struct cpfl_flow_js_pr_key_proto *protocols;
+		uint16_t proto_size;
+		struct cpfl_flow_js_pr_key_attr *attributes;
+		uint16_t attr_size;
+	} key;
+	/* An array to define how to map current key to low level pipeline configuration. */
+	struct cpfl_flow_js_pr_action *actions;
+	uint16_t actions_size;
+};
+
+/* Modification Rules Storage */
+/**
+ * The vxlan_encap action matches RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
+ * and include a sequence of protocol headers defined in field protocols
+ * of data.
+ */
+struct cpfl_flow_js_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	int proto_size;
+};
+
+/* A set of modification rte_flow_action_xxx objects can be defined as a type / data pair. */
+struct cpfl_flow_js_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_js_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_js_mr_key {
+	struct cpfl_flow_js_mr_key_action *actions;
+	int actions_size;
+};
+
+struct cpfl_flow_js_mr_layout {
+	int index;
+	char hint[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	uint16_t offset;
+	uint16_t size;
+};
+
+struct cpfl_flow_js_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	struct cpfl_flow_js_mr_layout *layout;
+	int layout_size;
+};
+
+enum cpfl_flow_mr_action_type {
+	CPFL_JS_MR_ACTION_TYPE_MOD,
+};
+
+/** Currently, the type can only be mod.
+ *
+ * For mod data, besides the profile ID, a layout array defines a set
+ * of hints that helps driver composing the MOD memory region when the
+ * action need to insert/update some packet data from user input.
+ */
+struct cpfl_flow_js_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_js_mr_action_mod mod;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD to parse rte_flow modification
+ * actions. Each rule be described by a pair of key and action
+ */
+struct cpfl_flow_js_mr {
+	struct cpfl_flow_js_mr_key key;
+	struct cpfl_flow_js_mr_action action;
+};
+
+struct cpfl_flow_js_parser {
+	struct cpfl_flow_js_pr *patterns;
+	int pr_size;
+	struct cpfl_flow_js_mr *modifications;
+	int mr_size;
+};
+
+/* Pattern Rules */
+struct cpfl_flow_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	uint8_t cpfl_flow_pr_fv[CPFL_MAX_SEM_FV_KEY_SIZE];
+};
+
+struct cpfl_flow_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_pr_action_sem sem;
+	};
+};
+
+
+/* Modification Rules */
+struct cpfl_flow_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	uint16_t proto_size;
+	const struct rte_flow_action *action;
+};
+
+struct cpfl_flow_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	uint8_t data[256];
+};
+
+struct cpfl_flow_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_mr_action_mod mod;
+	};
+};
+
+int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
+int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
+int cpfl_flow_parse_items(struct cpfl_flow_js_parser *parser,
+			  const struct rte_flow_item *items,
+			  const struct rte_flow_attr *attr,
+			  struct cpfl_flow_pr_action *pr_action);
+int cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser,
+			    const struct rte_flow_action *actions,
+			    struct cpfl_flow_mr_action *mr_action);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 0be25512c3..7b8d043011 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -41,6 +41,9 @@ endif
 
 js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
+    sources += files(
+        'cpfl_flow_parser.c',
+    )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
 endif
\ No newline at end of file
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v2 3/8] net/cpfl: add FXP low level implementation
  2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
  2023-09-01 11:31   ` [PATCH v2 1/8] net/cpfl: parse flow parser file in devargs Yuying Zhang
  2023-09-01 11:31   ` [PATCH v2 2/8] net/cpfl: add flow json parser Yuying Zhang
@ 2023-09-01 11:31   ` Yuying Zhang
  2023-09-01 11:31   ` [PATCH v2 4/8] net/cpfl: setup ctrl path Yuying Zhang
                     ` (5 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Yuying Zhang @ 2023-09-01 11:31 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Add FXP low level implementation for CPFL rte_flow to
create/delete rules.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_actions.h  | 858 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.c | 379 ++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  51 ++
 drivers/net/cpfl/cpfl_rules.c    | 126 +++++
 drivers/net/cpfl/cpfl_rules.h    | 306 +++++++++++
 drivers/net/cpfl/meson.build     |   2 +
 6 files changed, 1722 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h
new file mode 100644
index 0000000000..7b82119e39
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_actions.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_ACTIONS_H_
+#define _CPFL_ACTIONS_H_
+
+#include "base/idpf_osdep.h"
+
+#pragma pack(1)
+
+union cpfl_action_set {
+	uint32_t data;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 4;
+		uint32_t tag : 1;
+		uint32_t prec : 3;
+	} set_24b_a;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 3;
+		uint32_t tag : 2;
+		uint32_t prec : 3;
+	} set_24b_b;
+
+	struct {
+		uint32_t val : 16;
+		uint32_t idx : 4;
+		uint32_t unused : 6;
+		uint32_t tag : 3;
+		uint32_t prec : 3;
+	} set_16b;
+
+	struct {
+		uint32_t val_a : 8;
+		uint32_t val_b : 8;
+		uint32_t idx_a : 4;
+		uint32_t idx_b : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_8b;
+
+	struct {
+		uint32_t val : 10;
+		uint32_t ena : 10;
+		uint32_t idx : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_1b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} nop;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} chained_24b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} aux_flags;
+};
+
+struct cpfl_action_set_ext {
+#define CPFL_ACTION_SET_EXT_CNT 2
+	union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT];
+};
+
+#pragma pack()
+
+/**
+ * cpfl_act_nop - Encode a NOP action
+ */
+static inline union cpfl_action_set
+cpfl_act_nop(void)
+{
+	union cpfl_action_set act;
+
+	act.data = 0;
+	return act;
+}
+
+/**
+ * cpfl_is_nop_action - Indicate if an action set is a NOP
+ */
+static inline bool
+cpfl_is_nop_action(union cpfl_action_set *act)
+{
+	return act->data == cpfl_act_nop().data;
+}
+
+#define CPFL_MAKE_MASK32(b, s)	((((uint32_t)1 << (b)) - 1) << (s))
+
+#define CPFL_ACT_PREC_MAX	7
+#define CPFL_ACT_PREC_S		29
+#define CPFL_ACT_PREC_M		CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S)
+#define CPFL_ACT_PREC_SET(p)	\
+	(((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M)
+#define CPFL_ACT_PREC_CHECK(p)	((p) > 0 && (p) <= CPFL_ACT_PREC_MAX)
+
+#define CPFL_METADATA_ID_CNT		32	/* Max number of metadata IDs */
+#define CPFL_METADATA_STRUCT_MAX_SZ	128	/* Max metadata size per ID */
+
+/*******************************************************************************
+ * 1-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_1B_OP_S	24
+#define CPFL_ACT_1B_OP_M	CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S)
+#define CPFL_ACT_1B_OP		((uint32_t)(0x01) << CPFL_ACT_1B_OP_S)
+
+#define CPFL_ACT_1B_VAL_S	0
+#define CPFL_ACT_1B_VAL_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S)
+#define CPFL_ACT_1B_EN_S	10
+#define CPFL_ACT_1B_EN_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S)
+#define CPFL_ACT_1B_INDEX_S	20
+#define CPFL_ACT_1B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S)
+
+/* 1-bit actions currently uses only INDEX of 0 */
+#define CPFL_ACT_MAKE_1B(prec, en, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \
+	 ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \
+	 (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M))
+
+enum cpfl_act_1b_op {
+	CPFL_ACT_1B_OP_DROP		= 0x01,
+	CPFL_ACT_1B_OP_HDR_SPLIT	= 0x02,
+	CPFL_ACT_1B_OP_DIR_CHANGE	= 0x04,
+	CPFL_ACT_1B_OP_DEFER_DROP	= 0x08,
+	CPFL_ACT_1B_OP_ORIG_MIR_MD	= 0x80
+};
+
+#define CPFL_ACT_1B_COMMIT_MODE_S	4
+#define CPFL_ACT_1B_COMMIT_MODE_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S)
+
+/**
+ * cpfl_act_commit_mode - action commit mode for certain action classes
+ */
+enum cpfl_act_commit_mode {
+	/* Action processing for the initial classification pass */
+	CPFL_ACT_COMMIT_ALL		= 0, /* Commit all actions */
+	CPFL_ACT_COMMIT_PRE_MOD		= 1, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_NONE		= 2, /* Commit no action */
+	/* Action processing for deferred actions in a recirculation pass */
+	CPFL_ACT_COMMIT_RECIR_ALL	= 4, /* Commit all actions */
+	CPFL_ACT_COMMIT_RECIR_PRE_MOD	= 5, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_RECIR_NONE	= 6  /* Commit no action */
+};
+
+/*******************************************************************************
+ * 8-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_8B_S	24
+#define CPFL_ACT_OP_8B_M	CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S)
+#define CPFL_ACT_OP_8B		((uint32_t)(0x02) << CPFL_ACT_OP_8B_S)
+
+#define CPFL_ACT_8B_A_VAL_S	0
+#define CPFL_ACT_8B_A_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S)
+#define CPFL_ACT_8B_A_INDEX_S	16
+#define CPFL_ACT_8B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S)
+
+#define CPFL_ACT_8B_B_VAL_S	8
+#define CPFL_ACT_8B_B_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S)
+#define CPFL_ACT_8B_B_INDEX_S	20
+#define CPFL_ACT_8B_B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S)
+
+/* Unless combining two 8-bit actions into an action set, both A and B fields
+ * must be the same,
+ */
+#define CPFL_ACT_MAKE_8B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \
+	 (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \
+	 (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \
+	 (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \
+	 (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M))
+
+/* 8-Bit Action Indices */
+#define CPFL_ACT_8B_INDEX_MOD_META		9
+
+/* 8-Bit Action Miscellaneous */
+#define CPFL_ACT_8B_MOD_META_PROF_CNT		16
+#define CPFL_ACT_8B_MOD_META_VALID		0x80
+
+/*******************************************************************************
+ * 16-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_16B_S	26
+#define CPFL_ACT_OP_16B_M	CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S)
+#define CPFL_ACT_OP_16B		((uint32_t)0x1 << CPFL_ACT_OP_16B_S)
+
+#define CPFL_ACT_16B_INDEX_S	16
+#define CPFL_ACT_16B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S)
+#define CPFL_ACT_16B_VAL_S	0
+#define CPFL_ACT_16B_VAL_M	CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S)
+
+#define CPFL_ACT_MAKE_16B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \
+	 (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M))
+
+/* 16-Bit Action Indices */
+#define CPFL_ACT_16B_INDEX_COUNT_SET		0
+#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX	1
+#define CPFL_ACT_16B_INDEX_SET_VSI		2
+#define CPFL_ACT_16B_INDEX_DEL_MD		4
+#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST		5
+
+/* 16-Bit Action Miscellaneous */
+#define CPFL_ACT_16B_COUNT_SET_CNT		2048 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_SET_VSI_SLOTS		2
+#define CPFL_ACT_16B_FWD_VSI_CNT		1032 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_FWD_VSI_LIST_CNT		256
+#define CPFL_ACT_16B_MOD_VSI_LIST_CNT		1024
+#define CPFL_ACT_16B_FWD_PORT_CNT		4
+#define CPFL_ACT_16B_DEL_MD_MID_CNT		32
+#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS		4
+
+/* 16-Bit SET_MCAST_IDX Action */
+#define CPFL_ACT_16B_SET_MCAST_VALID	((uint32_t)1 << 15)
+
+/* 16-Bit SET_VSI Action Variants */
+#define CPFL_ACT_16B_SET_VSI_VAL_S		0
+#define CPFL_ACT_16B_SET_VSI_VAL_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S)
+#define CPFL_ACT_16B_SET_VSI_PE_S		11
+#define CPFL_ACT_16B_SET_VSI_PE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S)
+#define CPFL_ACT_16B_SET_VSI_TYPE_S		14
+#define CPFL_ACT_16B_SET_VSI_TYPE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S)
+
+/* 16-Bit DEL_MD Action */
+#define CPFL_ACT_16B_DEL_MD_0_S		0
+#define CPFL_ACT_16B_DEL_MD_1_S		5
+
+/* 16-Bit MOD_VSI_LIST Actions */
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S	0
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M	\
+	CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S)
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S	14
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S)
+#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \
+	((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \
+	 (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_ID_M))
+
+#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \
+	((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \
+		CPFL_ACT_16B_SET_VSI_TYPE_M) | \
+	 (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \
+		CPFL_ACT_16B_SET_VSI_PE_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \
+		CPFL_ACT_16B_SET_VSI_VAL_M))
+
+enum cpfl_prot_eng {
+	CPFL_PE_LAN = 0,
+	CPFL_PE_RDMA,
+	CPFL_PE_CRT
+};
+
+enum cpfl_act_fwd_type {
+	CPFL_ACT_FWD_VSI,
+	CPFL_ACT_FWD_VSI_LIST,
+	CPFL_ACT_FWD_PORT
+};
+
+/*******************************************************************************
+ * 24-Bit Actions
+ ******************************************************************************/
+/* Group A */
+#define CPFL_ACT_OP_24B_A_S	28
+#define CPFL_ACT_OP_24B_A_M	CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S)
+#define CPFL_ACT_24B_A_INDEX_S	24
+#define CPFL_ACT_24B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S)
+#define CPFL_ACT_24B_A_VAL_S	0
+#define CPFL_ACT_24B_A_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S)
+
+#define CPFL_ACT_OP_24B_A	((uint32_t)1 << CPFL_ACT_OP_24B_A_S)
+
+#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_MOD_ADDR	0
+#define CPFL_ACT_24B_INDEX_MIRROR_FIRST	1
+#define CPFL_ACT_24B_INDEX_COUNT	2
+#define CPFL_ACT_24B_INDEX_SET_Q	8
+#define CPFL_ACT_24B_INDEX_MOD_PROFILE	9
+#define CPFL_ACT_24B_INDEX_METER	10
+
+#define CPFL_ACT_24B_COUNT_SLOTS	6
+#define CPFL_ACT_24B_METER_SLOTS	6
+
+#define CPFL_ACT_24B_MOD_ADDR_CNT	(16 * 1024 * 1024)
+#define CPFL_ACT_24B_COUNT_ID_CNT	((uint32_t)1 << 24)
+#define CPFL_ACT_24B_SET_Q_CNT		(12 * 1024)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS	3
+
+/* 24-Bit SET_Q Action */
+#define CPFL_ACT_24B_SET_Q_Q_S		0
+#define CPFL_ACT_24B_SET_Q_Q_M		\
+	CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_S	14
+#define CPFL_ACT_24B_SET_Q_Q_RGN_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S)
+#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS	CPFL_MAKE_MASK32(1, 17)
+#define CPFL_ACT_24B_SET_Q_DST_PE_S	21
+#define CPFL_ACT_24B_SET_Q_DST_PE_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S)
+#define CPFL_ACT_24B_SET_Q_VALID	CPFL_MAKE_MASK32(1, 23)
+
+/* 24-Bit MOD_PROFILE Action */
+enum cpfl_act_mod_profile_hint {
+	CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */
+};
+
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_S		0
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S)
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S	12
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S)
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_S		14
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S)
+#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS		((uint32_t)1 << 16)
+#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND	((uint32_t)1 << 17)
+#define CPFL_ACT_24B_MOD_PROFILE_VALID			((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES	4
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT		2048
+
+/* 24-Bit METER Actions */
+#define CPFL_ACT_24B_METER_INDEX_S	0
+#define CPFL_ACT_24B_METER_INDEX_M	\
+	CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S)
+#define CPFL_ACT_24B_METER_BANK_S	20
+#define CPFL_ACT_24B_METER_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S)
+#define CPFL_ACT_24B_METER_VALID	((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_METER_BANK_CNT	6
+#define CPFL_ACT_24B_METER_INDEX_CNT	((uint32_t)1 << 20)
+
+/* Group B */
+#define CPFL_ACT_OP_24B_B_S	27
+#define CPFL_ACT_OP_24B_B_M	CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S)
+#define CPFL_ACT_24B_B_INDEX_S	24
+#define CPFL_ACT_24B_B_INDEX_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S)
+#define CPFL_ACT_24B_B_VAL_S	0
+#define CPFL_ACT_24B_B_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S)
+
+#define CPFL_ACT_OP_24B_B	((uint32_t)1 << CPFL_ACT_OP_24B_B_S)
+
+#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_SET_MD	0
+#define CPFL_ACT_24B_INDEX_RANGE_CHECK	6
+#define CPFL_ACT_24B_SET_MD_SLOTS	6
+
+/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */
+/* 8-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD8_VAL_S	0
+#define CPFL_ACT_24B_SET_MD8_VAL_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S)
+#define CPFL_ACT_24B_SET_MD8_MASK_S	8
+#define CPFL_ACT_24B_SET_MD8_MASK_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S)
+#define CPFL_ACT_24B_SET_MD8_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD8_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S)
+/* 16-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD16_VAL_S	0
+#define CPFL_ACT_24B_SET_MD16_VAL_M	\
+	CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_L_S	16 /* For chained action */
+#define CPFL_ACT_24B_SET_MD16_MASK_L_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_H_SR	8
+#define CPFL_ACT_24B_SET_MD16_MASK_H_M	0xff
+#define CPFL_ACT_24B_SET_MD16_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD16_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD16		((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_SET_MD32_VAL_L_M	CPFL_MAKE_MASK32(24, 0)
+
+#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX		15
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX	7
+#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX	15
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX	7
+
+/* RANGE_CHECK Action */
+enum cpfl_rule_act_rc_mode {
+	CPFL_RULE_ACT_RC_1_RANGE = 0,
+	CPFL_RULE_ACT_RC_2_RANGES = 1,
+	CPFL_RULE_ACT_RC_4_RANGES = 2,
+	CPFL_RULE_ACT_RC_8_RANGES = 3
+};
+
+#define CPFL_ACT_24B_RC_TBL_IDX_S	0
+#define CPFL_ACT_24B_RC_TBL_IDX_M	\
+	CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S)
+#define CPFL_ACT_24B_RC_START_BANK_S	13
+#define CPFL_ACT_24B_RC_START_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S)
+#define CPFL_ACT_24B_RC_MODE_S		16
+#define CPFL_ACT_24B_RC_MODE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S)
+#define CPFL_ACT_24B_RC_XTRACT_PROF_S	18
+#define CPFL_ACT_24B_RC_XTRACT_PROF_M	\
+	CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S)
+
+#define CPFL_ACT_24B_RC_TBL_INDEX_CNT	(8 * 1024)
+#define CPFL_ACT_24B_RC_BANK_CNT	8
+#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT	64
+
+/*******************************************************************************
+ * 24-Bit Chained Auxiliary Actions
+ ******************************************************************************/
+
+/* TODO: HAS is being updated.  Revise the order of chained and base action
+ * when the HAS has it finalized.
+ */
+/**
+ * 24-Bit Chained SET_MD Actions
+ *
+ * Chained SET_MD actions consume two consecutive action sets.  The first one is
+ * the chained AUX action set.  The second one is the base/parent action set.
+ * Chained SET_MD actions can add and/or update metadata structure with IDs from
+ * 0 to 31 while the non-chained SET_MD variants can only update existing meta-
+ * data IDs below 16.
+ */
+
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S	8
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M	\
+	CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD_AUX_ADD		((uint32_t)1 << 15)
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S	16
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_S		0
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_M		\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S	0
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S)
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR	24 /* Upper 8 bits of MD32 */
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M	0xff
+
+#define CPFL_ACT_TYPE_CHAIN_DATA_S	29
+#define CPFL_ACT_TYPE_CHAIN_DATA_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S)
+#define CPFL_ACT_TYPE_CHAIN_DATA	((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_OP_S	21
+#define CPFL_ACT_24B_SET_MD_OP_8B	((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_16B	((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_32B	((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \
+	(CPFL_ACT_TYPE_CHAIN_DATA | (op) | \
+	 (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \
+	 (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \
+	 (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_DATA_M))
+
+/*******************************************************************************
+ * 1-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_drop - Encode a 1-bit DROP action
+ *
+ * The DROP action has precedence over the DEFER_DOP action.
+ * Affect of ACT_COMMIT action on the DROP action:
+ *  - CPFL_ACT_COMMIT_ALL: Packet is dropped.
+ *  - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped.
+ *  - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped.  Recirculation is canceled.
+ *  - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not
+ *    dropped. Recirculation continues.
+ *
+ * Once a DROP action is set, it cannot be reverted during the classification
+ * process of a network packet.
+ */
+static inline union cpfl_action_set
+cpfl_act_drop(uint8_t prec)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1);
+	return a;
+}
+
+/**
+ * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action
+ * An ACT_COMMIT action specifies if and when all actions are committed.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M,
+				  (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S);
+	return a;
+}
+
+/*******************************************************************************
+ * 8-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_meta - Encode an 8-bit MOD_META action
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_meta(uint8_t prec, uint8_t prof)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META,
+				  CPFL_ACT_8B_MOD_META_VALID | prof);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 16-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI)
+ *
+ * This encodes the "Forward to Single VSI" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    vsi >= CPFL_ACT_16B_FWD_VSI_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port)
+ *
+ * This encodes the "Forward to a port" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    port >= CPFL_ACT_16B_FWD_PORT_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 24-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action
+ *
+ * This MOD_ADDR specifies the index of the MOD content entry an accompanying
+ * MOD_PROFILE action uses.  Some MOD_PROFILE actions may need to use extra
+ * information from a Modify content entry, and requires an accompanying
+ * MOD_ADDR action.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR,
+				     mod_addr);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant)
+ *
+ * This action is a "Forward to a single queue" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q,
+			bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region)
+ *
+ * This action is a "Forward to a queue region" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base,
+			       uint8_t q_rgn_bits, bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT ||
+	    q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base |
+		((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action
+ *
+ * This action specifies a Modify profile to use for modifying the network
+ * packet being classified.  In addition, it also provides a hint to whether
+ * or not an accompanied MOD_ADDR action is expected and should be prefetched.
+ *
+ * There is only one MOD_PROFILE action slot.  If multiple classification blocks
+ * emit this action, the precedence value and auxiliary precedence value will be
+ * used to select one with higher precedence.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus,
+		     bool miss_prepend, enum cpfl_act_mod_profile_hint hint)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) ||
+	    prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT ||
+	    ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_MOD_PROFILE_VALID |
+		(((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) &
+			CPFL_ACT_24B_MOD_PROFILE_HINT_M) |
+		(((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) &
+			CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) |
+		((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S);
+	if (append_act_bus)
+		val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS;
+	if (miss_prepend)
+		val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND;
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_meter - Encode a 24-bit METER action
+ *
+ * Return NOP if any given input parameter is invalid.
+ *
+ * A bank can only be used by one of the METER action slots.  If multiple METER
+ * actions select the same bank, the action with the highest action slot wins.
+ * In Policer mode, METER actions at the higher indexes have precedence over
+ * ones at lower indexes.
+ */
+static inline union cpfl_action_set
+cpfl_act_meter(uint8_t slot, uint8_t prec, uint32_t idx, uint8_t bank)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS  ||
+	    idx >= CPFL_ACT_24B_METER_INDEX_CNT ||
+	    bank >= CPFL_ACT_24B_METER_BANK_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_METER_VALID |
+		(uint32_t)idx << CPFL_ACT_24B_METER_INDEX_S |
+		(uint32_t)bank << CPFL_ACT_24B_METER_BANK_S;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot,
+				     val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot
+ *
+ * This SET_MD action sets/updates a byte of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 bytes of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX ||
+	    off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) |
+		((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) |
+		((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot
+ *
+ * This SET_MD action sets/updates a word of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 words of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX ||
+	    word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) |
+		((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) |
+		((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot
+ *
+ * This SET_MD action sets/updates a dword of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action is made up of 2 chained
+ * action sets.  The chained action set is the first.  The base/parent action
+ * sets is the second.
+ */
+static inline void
+cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid,
+		      uint8_t off, uint32_t val)
+{
+	if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) ||
+	    mid >= CPFL_METADATA_ID_CNT ||
+	    (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) {
+		ext->acts[0] = cpfl_act_nop();
+		ext->acts[1] = cpfl_act_nop();
+	} else {
+		uint32_t tmp;
+
+		/* Chained action set comes first */
+		tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR;
+		ext->acts[0].data =
+			CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B,
+						     mid, off, tmp);
+
+		/* Lower 24 bits of value */
+		tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M;
+		ext->acts[1].data =
+			CPFL_ACT_MAKE_24B_B(prec,
+					    CPFL_ACT_24B_INDEX_SET_MD + slot,
+					    tmp);
+	}
+}
+
+#endif /* _CPFL_ACTIONS_H_ */
diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
new file mode 100644
index 0000000000..476c78f235
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include "cpfl_controlq.h"
+#include "base/idpf_controlq.h"
+#include "rte_common.h"
+
+/**
+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
+ * @qinfo: pointer to create control queue info struct
+ *
+ * Verify that DMA parameter of each DMA memory struct is present and
+ * consistent with control queue parameters
+ */
+static inline int
+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
+{
+	struct idpf_dma_mem *ring = &qinfo->ring_mem;
+	struct idpf_dma_mem *buf = &qinfo->buf_mem;
+
+	if (!ring->va || !ring->size)
+		return -EBADR;
+
+	if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
+		return -EINVAL;
+
+	/* no need for buffer checks for TX queues */
+	if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
+		return 0;
+
+	if (!buf->va || !buf->size)
+		return -EBADR;
+
+	/* accommodate different types of rx ring buffer sizes */
+	if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
+	     buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
+	    (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
+	     buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
+		return -EBADR;
+
+	return 0;
+}
+
+/**
+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ * @qinfo: pointer to create queue info struct
+ *
+ * The CP takes care of all DMA memory allocations. Store the allocated memory
+ * information for the descriptor ring and buffers. If the memory for either the
+ * descriptor ring or the buffers is not allocated properly and/or inconsistent
+ * with the control queue parameters, this routine will free the memory for
+ * both the descriptors and the buffers
+ */
+int
+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
+			 struct cpfl_ctlq_create_info *qinfo)
+{
+	int ret_code = 0;
+	unsigned int elem_size;
+	int i = 0;
+
+	ret_code = cpfl_check_dma_mem_parameters(qinfo);
+	if (ret_code)
+		/* TODO: Log an error message per CP */
+		goto err;
+
+	cq->desc_ring.va = qinfo->ring_mem.va;
+	cq->desc_ring.pa = qinfo->ring_mem.pa;
+	cq->desc_ring.size = qinfo->ring_mem.size;
+
+	switch (cq->cq_type) {
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_RDMA_RX:
+		/* Only receive queues will have allocated buffers
+		 * during init.  CP allocates one big chunk of DMA
+		 * region who size is equal to ring_len * buff_size.
+		 * In CPFLib, the block gets broken down to multiple
+		 * smaller blocks that actually gets programmed in the hardware.
+		 */
+
+		cq->bi.rx_buff = (struct idpf_dma_mem **)
+			idpf_calloc(hw, cq->ring_size,
+				    sizeof(struct idpf_dma_mem *));
+		if (!cq->bi.rx_buff) {
+			ret_code = -ENOMEM;
+			/* TODO: Log an error message per CP */
+			goto err;
+		}
+
+		elem_size = qinfo->buf_size;
+		for (i = 0; i < cq->ring_size; i++) {
+			cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
+					    (hw, 1,
+					     sizeof(struct idpf_dma_mem));
+			if (!cq->bi.rx_buff[i]) {
+				ret_code = -ENOMEM;
+				goto free_rx_buffs;
+			}
+			cq->bi.rx_buff[i]->va =
+			    (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));
+			cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
+					       (i * elem_size);
+			cq->bi.rx_buff[i]->size = elem_size;
+		}
+		break;
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_RDMA_TX:
+	case IDPF_CTLQ_TYPE_RDMA_COMPL:
+		break;
+	default:
+		ret_code = -EBADR;
+	}
+
+	return ret_code;
+
+free_rx_buffs:
+	i--;
+	for (; i >= 0; i--)
+		idpf_free(hw, cq->bi.rx_buff[i]);
+
+	if (!cq->bi.rx_buff)
+		idpf_free(hw, cq->bi.rx_buff);
+
+err:
+	return ret_code;
+}
+
+/**
+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	for (i = 0; i < cq->ring_size; i++) {
+		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+		/* No buffer to post to descriptor, continue */
+		if (!bi)
+			continue;
+
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+		desc->opcode = 0;
+		desc->datalen = CPU_TO_LE16(bi->size);
+		desc->ret_val = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+		desc->params.indirect.param0 = 0;
+		desc->params.indirect.param1 = 0;
+	}
+}
+
+/**
+ * cpfl_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
+{
+	/* set control queue registers in our local struct */
+	cq->reg.head = q_create_info->reg.head;
+	cq->reg.tail = q_create_info->reg.tail;
+	cq->reg.len = q_create_info->reg.len;
+	cq->reg.bah = q_create_info->reg.bah;
+	cq->reg.bal = q_create_info->reg.bal;
+	cq->reg.len_mask = q_create_info->reg.len_mask;
+	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+	cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * cpfl_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
+{
+	/* Update tail to post pre-allocated buffers for rx queues */
+	if (is_rxq)
+		wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
+
+	/* For non-Mailbox control queues only TAIL need to be set */
+	if (cq->q_id != -1)
+		return;
+
+	/* Clear Head for both send or receive */
+	wr32(hw, cq->reg.head, 0);
+
+	/* set starting point */
+	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
+ * @hw: context info for the callback
+ * @cq: pointer to the specific control queue
+ *
+ * DMA buffers are released by the CP itself
+ */
+static void
+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
+{
+	int i;
+
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
+	    cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
+		for (i = 0; i < cq->ring_size; i++)
+			idpf_free(hw, cq->bi.rx_buff[i]);
+		/* free the buffer header */
+		idpf_free(hw, cq->bi.rx_buff);
+	} else {
+		idpf_free(hw, cq->bi.tx_msg);
+	}
+}
+
+/**
+ * cpfl_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ */
+int
+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+	      struct idpf_ctlq_info **cq_out)
+{
+	struct idpf_ctlq_info *cq;
+	bool is_rxq = false;
+	int status = 0;
+
+	if (!qinfo->len || !qinfo->buf_size ||
+	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+		return -EINVAL;
+
+	cq = (struct idpf_ctlq_info *)
+	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+
+	if (!cq)
+		return -ENOMEM;
+
+	cq->cq_type = qinfo->type;
+	cq->q_id = qinfo->id;
+	cq->buf_size = qinfo->buf_size;
+	cq->ring_size = qinfo->len;
+
+	cq->next_to_use = 0;
+	cq->next_to_clean = 0;
+	cq->next_to_post = cq->ring_size - 1;
+
+	switch (qinfo->type) {
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+		is_rxq = true;
+		/* fallthrough */
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+		status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
+		break;
+
+	default:
+		status = -EINVAL;
+		break;
+	}
+
+	if (status)
+		goto init_free_q;
+
+	if (is_rxq) {
+		cpfl_ctlq_init_rxq_bufs(cq);
+	} else {
+		/* Allocate the array of msg pointers for TX queues */
+		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
+			idpf_calloc(hw, qinfo->len,
+				    sizeof(struct idpf_ctlq_msg *));
+		if (!cq->bi.tx_msg) {
+			status = -ENOMEM;
+			goto init_dealloc_q_mem;
+		}
+	}
+
+	cpfl_ctlq_setup_regs(cq, qinfo);
+
+	cpfl_ctlq_init_regs(hw, cq, is_rxq);
+
+	idpf_init_lock(&cq->cq_lock);
+
+	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
+
+	*cq_out = cq;
+	return status;
+
+init_dealloc_q_mem:
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+	idpf_free(hw, cq);
+	cq = NULL;
+
+	return status;
+}
+
+int
+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+		    struct idpf_ctlq_info **cq)
+{
+	return cpfl_ctlq_add(hw, qinfo, cq);
+}
+
+/**
+ * cpfl_ctlq_shutdown - shutdown the CQ
+ * The main shutdown routine for any controq queue
+ */
+static void
+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (!cq->ring_size)
+		goto shutdown_sq_out;
+
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+
+	/* Set ring_size to 0 to indicate uninitialized queue */
+	cq->ring_size = 0;
+
+shutdown_sq_out:
+	idpf_release_lock(&cq->cq_lock);
+	idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * cpfl_ctlq_remove - deallocate and remove specified control queue
+ */
+static void
+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	LIST_REMOVE(cq, cq_list);
+	cpfl_ctlq_shutdown(hw, cq);
+	idpf_free(hw, cq);
+}
+
+void
+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	cpfl_ctlq_remove(hw, cq);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
new file mode 100644
index 0000000000..930d717f63
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CONTROLQ_H_
+#define _CPFL_CONTROLQ_H_
+
+#include "base/idpf_osdep.h"
+#include "base/idpf_controlq_api.h"
+
+#define CPFL_CTLQ_DESCRIPTOR_SIZE	32
+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE	4096
+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE	256
+#define CPFL_DFLT_MBX_RING_LEN		512
+#define CPFL_CFGQ_RING_LEN		512
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct cpfl_ctlq_create_info {
+	enum idpf_ctlq_type type;
+	int id; /* absolute queue offset passed as input
+		 * -1 for default mailbox if present
+		 */
+	uint16_t len; /* Queue length passed as input */
+	uint16_t buf_size; /* buffer size passed as input */
+	uint64_t base_address; /* output, HPA of the Queue start  */
+	struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+	/* Pass down previously allocated descriptor ring and buffer memory
+	 * for each control queue to be created
+	 */
+	struct idpf_dma_mem ring_mem;
+	/* The CP will allocate one large buffer that the CPFlib will piece
+	 * into individual buffers for each descriptor
+	 */
+	struct idpf_dma_mem buf_mem;
+
+	int ext_info_size;
+	void *ext_info; /* Specific to q type */
+};
+
+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
+			     struct idpf_ctlq_info *cq,
+			     struct cpfl_ctlq_create_info *qinfo);
+int cpfl_ctlq_add(struct idpf_hw *hw,
+		  struct cpfl_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq);
+int cpfl_vport_ctlq_add(struct idpf_hw *hw,
+			struct cpfl_ctlq_create_info *qinfo,
+			struct idpf_ctlq_info **cq);
+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+#endif
diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c
new file mode 100644
index 0000000000..eefae1767c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.c
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include <base/idpf_controlq.h>
+#include <stdint.h>
+#include "cpfl_rules.h"
+
+ /**
+  * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor
+  */
+static inline uint64_t
+cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
+{
+	uint64_t context = 0;
+
+	switch (cmn_cfg->opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		/* fallthrough */
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+	case cpfl_ctlq_sem_add_rule:
+	case cpfl_ctlq_sem_del_rule:
+	case cpfl_ctlq_sem_query_rule:
+	case cpfl_ctlq_sem_update_rule:
+		context |= SHIFT_VAL64(cmn_cfg->time_sel,
+				       MEV_RULE_TIME_SEL);
+		context |= SHIFT_VAL64(cmn_cfg->time_sel_val,
+				       MEV_RULE_TIME_SEL_VAL);
+		context |= SHIFT_VAL64(cmn_cfg->host_id,
+				       MEV_RULE_HOST_ID);
+		context |= SHIFT_VAL64(cmn_cfg->port_num,
+				       MEV_RULE_PORT_NUM);
+		context |= SHIFT_VAL64(cmn_cfg->resp_req,
+				       MEV_RULE_RESP_REQ);
+		context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru,
+				       MEV_RULE_CACHE_WR_THRU);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc_ctx - get bit context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
+{
+	uint64_t context = 0;
+
+	context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common);
+
+	switch (cfg_data->common.opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size,
+				       MEV_RULE_MOD_OBJ_SIZE);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content,
+				       MEV_RULE_PIN_MOD_CONTENT);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.index,
+				       MEV_RULE_MOD_INDEX);
+		break;
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
+				       MEV_RULE_OBJ_ID);
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
+				       MEV_RULE_OBJ_ADDR);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc - build descriptor data from rule config data
+ *
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg)
+{
+	uint64_t context;
+	uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0];
+
+	context = cpfl_prep_rule_desc_ctx(cfg_data);
+	*ctlq_ctx = CPU_TO_LE64(context);
+	memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t));
+	ctlq_msg->opcode = (uint16_t)cfg_data->common.opc;
+	ctlq_msg->data_len = cfg_data->common.buf_len;
+	ctlq_msg->status = 0;
+	ctlq_msg->ctx.indirect.payload = cfg_data->common.payload;
+}
+
+/**
+ * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob)
+{
+	uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions;
+	const uint32_t *act_src = (const uint32_t *)act_bytes;
+	uint32_t i;
+
+	idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
+	idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len,
+		    CPFL_NONDMA_TO_DMA);
+
+	for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
+		*act_dst++ = CPU_TO_LE32(*act_src++);
+
+	*((uint16_t *)&rule_blob->sem_rule.cfg_ctrl) = CPU_TO_LE16(cfg_ctrl);
+}
diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h
new file mode 100644
index 0000000000..d23eae8e91
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_RULES_API_H_
+#define _CPFL_RULES_API_H_
+
+#include <base/idpf_controlq_api.h>
+#include "cpfl_actions.h"
+#include "cpfl_controlq.h"
+
+/* Common Bit Mask Macros */
+#define CPFL_BIT(b)			(1 << (b))
+
+#define MAKE_MASK(type, mask, shift)	((u##type) (mask) << (shift))
+#define SHIFT_VAL_LT(type, val, field)		\
+		(((u##type)(val) << field##_S) & field##_M)
+#define SHIFT_VAL_RT(type, val, field)		\
+		(((u##type)(val) & field##_M) >> field##_S)
+
+#define MAKE_MASK_VAL(type, bit_len)	(((u##type)0x01 << (bit_len)) - 1)
+#define MAKE_MASK_VAL16(bit_len)	MAKE_MASK_VAL(16, bit_len)
+#define MAKE_MASK_VAL64(bit_len)	MAKE_MASK_VAL(64, bit_len)
+
+#define MAKE_MASK64(mask, shift)	MAKE_MASK(64, mask, shift)
+#define MAKE_MASK16(mask, shift)	MAKE_MASK(16, mask, shift)
+#define MAKE_MASK32(mask, shift)	MAKE_MASK(32, mask, shift)
+
+/* Make masks with bit length and left-shifting count */
+#define MAKE_SMASK(type, bits, shift)	\
+	((((u##type)1 << (bits)) - 1) << (shift))
+#define MAKE_SMASK64(bits, shift)	MAKE_SMASK(64, bits, shift)
+#define MAKE_SMASK32(bits, shift)	MAKE_SMASK(32, bits, shift)
+#define MAKE_SMASK16(bits, shift)	MAKE_SMASK(16, bits, shift)
+
+#define SHIFT_VAL64(val, field)		SHIFT_VAL_LT(64, val, field)
+#define SHIFT_VAL32(val, field)		SHIFT_VAL_LT(32, val, field)
+#define SHIFT_VAL16(val, field)		SHIFT_VAL_LT(16, val, field)
+
+/* Rule Config queue opcodes */
+enum cpfl_ctlq_rule_cfg_opc {
+	cpfl_ctlq_sem_add_rule				= 0x1303,
+	cpfl_ctlq_sem_update_rule			= 0x1304,
+	cpfl_ctlq_sem_del_rule				= 0x1305,
+	cpfl_ctlq_sem_query_rule			= 0x1306,
+	cpfl_ctlq_sem_query_rule_hash_addr		= 0x1307,
+	cpfl_ctlq_sem_query_del_rule_hash_addr		= 0x1308,
+
+	cpfl_ctlq_mod_add_update_rule			= 0x1360,
+	cpfl_ctlq_mod_query_rule			= 0x1361,
+};
+
+enum cpfl_cfg_pkt_error_code {
+	CPFL_CFG_PKT_ERR_OK = 0,
+	CPFL_CFG_PKT_ERR_ESRCH = 1,     /* Bad opcode */
+	CPFL_CFG_PKT_ERR_EEXIST = 2,    /* Entry Already exists */
+	CPFL_CFG_PKT_ERR_ENOSPC = 4,    /* No space left in the table*/
+	CPFL_CFG_PKT_ERR_ERANGE = 5,    /* Parameter out of range */
+	CPFL_CFG_PKT_ERR_ESBCOMP = 6,   /* Completion Error */
+	CPFL_CFG_PKT_ERR_ENOPIN = 7,    /* Entry cannot be pinned in cache */
+	CPFL_CFG_PKT_ERR_ENOTFND = 8,   /* Entry Not exists */
+	CPFL_CFG_PKT_ERR_EMAXCOL = 9    /* Max Hash Collision */
+};
+
+/* macros for creating context for rule descriptor */
+#define MEV_RULE_VSI_ID_S		0
+#define MEV_RULE_VSI_ID_M		\
+		MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S)
+
+#define MEV_RULE_TIME_SEL_S		13
+#define MEV_RULE_TIME_SEL_M		\
+		MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S)
+
+#define MEV_RULE_TIME_SEL_VAL_S		15
+#define MEV_RULE_TIME_SEL_VAL_M		\
+		MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S)
+
+#define MEV_RULE_PORT_NUM_S		16
+#define MEV_RULE_HOST_ID_S		18
+#define MEV_RULE_PORT_NUM_M		\
+		MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S)
+#define MEV_RULE_HOST_ID_M		\
+		MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S)
+
+#define MEV_RULE_CACHE_WR_THRU_S	21
+#define MEV_RULE_CACHE_WR_THRU_M	\
+		MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S)
+
+#define MEV_RULE_RESP_REQ_S		22
+#define MEV_RULE_RESP_REQ_M		\
+		MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S)
+#define MEV_RULE_OBJ_ADDR_S		24
+#define MEV_RULE_OBJ_ADDR_M		\
+		MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S)
+#define MEV_RULE_OBJ_ID_S		59
+#define MEV_RULE_OBJ_ID_M		\
+		MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S)
+
+/* macros for creating CFG_CTRL for sem/lem rule blob */
+#define MEV_RULE_CFG_CTRL_PROF_ID_S			0
+#define MEV_RULE_CFG_CTRL_PROF_ID_M			\
+		MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S)
+
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S		11
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M		\
+		MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S)
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_S		13
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S)
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S	14
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M	\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S)
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S		15
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S)
+
+/**
+ * macro to build the CFG_CTRL for rule packet data, which is one of
+ * cpfl_prep_sem_rule_blob()'s input parameter.
+ */
+ /* build SEM CFG_CTRL*/
+#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id,		       \
+				       pin_to_cache, fixed_fetch)	       \
+		(SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID)	     | \
+		 SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID)   | \
+		 SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE)    | \
+		 SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH))
+
+/* build LEM CFG_CTRL*/
+#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror)    \
+		(SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID)             | \
+		 SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE)      | \
+		 SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR))
+
+/* macros for creating mod content config packets */
+#define MEV_RULE_MOD_INDEX_S		24
+#define MEV_RULE_MOD_INDEX_M		\
+		MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S)
+
+#define MEV_RULE_PIN_MOD_CONTENT_S	62
+#define MEV_RULE_PIN_MOD_CONTENT_M	\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S)
+#define MEV_RULE_MOD_OBJ_SIZE_S		63
+#define MEV_RULE_MOD_OBJ_SIZE_M		\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S)
+
+/**
+ * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM
+ * note: The key may be in mixed big/little endian format, the rest of members
+ * are in little endian
+ */
+struct cpfl_sem_rule_cfg_pkt {
+#define MEV_SEM_RULE_KEY_SIZE 128
+	uint8_t key[MEV_SEM_RULE_KEY_SIZE];
+
+#define MEV_SEM_RULE_ACT_SIZE 72
+	uint8_t actions[MEV_SEM_RULE_ACT_SIZE];
+
+	/* Bit(s):
+	 * 10:0 : PROFILE_ID
+	 * 12:11: SUB_PROF_ID (used for SEM only)
+	 * 13   : pin the SEM key content into the cache
+	 * 14   : Reserved
+	 * 15   : Fixed_fetch
+	 */
+	uint8_t cfg_ctrl[2];
+
+	/* Bit(s):
+	 * 0:     valid
+	 * 15:1:  Hints
+	 * 26:16: PROFILE_ID, the profile associated with the entry
+	 * 31:27: PF
+	 * 55:32: FLOW ID (assigned by HW)
+	 * 63:56: EPOCH
+	 */
+	uint8_t ctrl_word[8];
+	uint8_t padding[46];
+};
+
+/**
+ * union cpfl_rule_cfg_pkt_record - Describes rule data blob
+ */
+union cpfl_rule_cfg_pkt_record {
+	struct cpfl_sem_rule_cfg_pkt sem_rule;
+	uint8_t pkt_data[256];
+	uint8_t mod_blob[256];
+};
+
+/**
+ * cpfl_rule_query_addr - LEM/SEM Rule Query Address structure
+ */
+struct cpfl_rule_query_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_query_del_addr - Rule Query and Delete Address
+ */
+struct cpfl_rule_query_del_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_mod_content - MOD Rule Content
+ */
+struct cpfl_rule_mod_content {
+	uint8_t	obj_size;
+	uint8_t	pin_content;
+	uint32_t	index;
+};
+
+/**
+ * cpfl_rule_cfg_data_common - data struct for all rule opcodes
+ *note: some rules may only require part of structure
+ */
+struct cpfl_rule_cfg_data_common {
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	uint64_t	cookie;
+	uint16_t	vsi_id;
+	uint8_t	port_num;
+	uint8_t	host_id;
+	uint8_t	time_sel;
+	uint8_t	time_sel_val;
+	uint8_t	cache_wr_thru;
+	uint8_t	resp_req;
+	uint32_t	ret_val;
+	uint16_t	buf_len;
+	struct idpf_dma_mem *payload;
+};
+
+/**
+ * cpfl_rule_cfg_data - rule config data
+ * note: Before sending rule to HW, caller needs to fill
+ *       in this struct then call cpfl_prep_rule_desc().
+ */
+struct cpfl_rule_cfg_data {
+	struct cpfl_rule_cfg_data_common common;
+	union {
+		struct cpfl_rule_query_addr query_addr;
+		struct cpfl_rule_query_del_addr query_del_addr;
+		struct cpfl_rule_mod_content mod_content;
+	} ext;
+};
+
+/**
+ * cpfl_fill_rule_mod_content - fill info for mod content
+ */
+static inline void
+cpfl_fill_rule_mod_content(uint8_t mod_obj_size,
+			   uint8_t pin_mod_content,
+			   uint32_t mod_index,
+			   struct cpfl_rule_mod_content *mod_content)
+{
+	mod_content->obj_size = mod_obj_size;
+	mod_content->pin_content = pin_mod_content;
+	mod_content->index = mod_index;
+}
+
+/**
+ * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes
+ * note: call this function before calls cpfl_prep_rule_desc()
+ */
+static inline void
+cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc,
+			       uint64_t cookie,
+			       uint16_t vsi_id,
+			       uint8_t port_num,
+			       uint8_t host_id,
+			       uint8_t time_sel,
+			       uint8_t time_sel_val,
+			       uint8_t cache_wr_thru,
+			       uint8_t resp_req,
+			       uint16_t payload_len,
+			       struct idpf_dma_mem *payload,
+			       struct cpfl_rule_cfg_data_common *cfg_cmn)
+{
+	cfg_cmn->opc = opc;
+	cfg_cmn->cookie = cookie;
+	cfg_cmn->vsi_id = vsi_id;
+	cfg_cmn->port_num = port_num;
+	cfg_cmn->resp_req = resp_req;
+	cfg_cmn->ret_val = 0;
+	cfg_cmn->host_id = host_id;
+	cfg_cmn->time_sel = time_sel;
+	cfg_cmn->time_sel_val = time_sel_val;
+	cfg_cmn->cache_wr_thru = cache_wr_thru;
+
+	cfg_cmn->buf_len = payload_len;
+	cfg_cmn->payload = payload;
+}
+
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg);
+
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob);
+
+#endif /* _CPFL_RULES_API_H_ */
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 7b8d043011..9a8d25ffae 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -18,6 +18,7 @@ sources = files(
         'cpfl_rxtx.c',
         'cpfl_representor.c',
         'cpfl_vchnl.c',
+        'cpfl_controlq.c',
 )
 
 if arch_subdir == 'x86'
@@ -43,6 +44,7 @@ js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
     sources += files(
         'cpfl_flow_parser.c',
+        'cpfl_rules.c',
     )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v2 4/8] net/cpfl: setup ctrl path
  2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
                     ` (2 preceding siblings ...)
  2023-09-01 11:31   ` [PATCH v2 3/8] net/cpfl: add FXP low level implementation Yuying Zhang
@ 2023-09-01 11:31   ` Yuying Zhang
  2023-09-01 11:31   ` [PATCH v2 5/8] net/cpfl: set up rte flow skeleton Yuying Zhang
                     ` (4 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Yuying Zhang @ 2023-09-01 11:31 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao, Yuying Zhang

From: Wenjing Qiao <wenjing.qiao@intel.com>

Setup the control vport and control queue for flow offloading.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 267 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h |  14 ++
 drivers/net/cpfl/cpfl_vchnl.c  | 144 ++++++++++++++++++
 3 files changed, 425 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 3c4a6a4724..22f3e72894 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1657,6 +1657,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 		return;
 	}
 
+	/* ignore if it is ctrl vport */
+	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
+		return;
+
 	vport = cpfl_find_vport(adapter, vc_event->vport_id);
 	if (!vport) {
 		PMD_DRV_LOG(ERR, "Can't find vport.");
@@ -1852,6 +1856,260 @@ cpfl_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 }
 
+static int
+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	ret = cpfl_config_ctlq_tx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
+		return ret;
+	}
+
+	ret = cpfl_config_ctlq_rx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
+		return ret;
+	}
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	int i;
+
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+}
+
+static int
+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_ctlq_info *cfg_cq;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
+					  &adapter->cfgq_info[i],
+					  &cfg_cq);
+		if (ret || !cfg_cq) {
+			PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d",
+				    adapter->cfgq_info[i].id);
+			cpfl_remove_cfgqs(adapter);
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
+			    adapter->cfgq_info[i].id);
+		adapter->ctlqp[i] = cfg_cq;
+	}
+
+	return ret;
+}
+
+#define CPFL_CFGQ_RING_LEN		512
+#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
+#define CPFL_CFGQ_BUFFER_SIZE		256
+#define CPFL_CFGQ_RING_SIZE		512
+
+static int
+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	struct cpfl_vport *vport;
+	int i, err;
+	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);
+	uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;
+
+	vport = &adapter->ctrl_vport;
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (i % 2 == 0) {
+			/* Setup Tx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +
+				i / 2 * vport->base.chunks_info.tx_qtail_spacing;
+		} else {
+			/* Setup Rx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +
+				i / 2 * vport->base.chunks_info.rx_qtail_spacing;
+			if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,
+						buf_size)) {
+				err = -ENOMEM;
+				goto free_mem;
+			}
+		}
+		if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,
+					ring_size)) {
+			err = -ENOMEM;
+			goto free_mem;
+		}
+	}
+	return 0;
+free_mem:
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+	return err;
+}
+
+static int
+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_create_vport *vport_info =
+		(struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;
+	int i;
+
+	vport->itf.adapter = adapter;
+	vport->base.adapter = &adapter->base;
+	vport->base.vport_id = vport_info->vport_id;
+
+	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+		if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {
+			vport->base.chunks_info.tx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.tx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.tx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {
+			vport->base.chunks_info.rx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.rx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.rx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else {
+			PMD_INIT_LOG(ERR, "Unsupported chunk type");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)
+{
+	cpfl_remove_cfgqs(adapter);
+	cpfl_stop_cfgqs(adapter);
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+}
+
+static int
+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_vc_create_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to create control vport");
+		return ret;
+	}
+
+	ret = cpfl_init_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init control vport");
+		goto err_init_ctrl_vport;
+	}
+
+	ret = cpfl_cfgq_setup(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control queues");
+		goto err_cfgq_setup;
+	}
+
+	ret = cpfl_add_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to add control queues");
+		goto err_add_cfgq;
+	}
+
+	ret = cpfl_start_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to start control queues");
+		goto err_start_cfgqs;
+	}
+
+	return 0;
+
+err_start_cfgqs:
+	cpfl_stop_cfgqs(adapter);
+err_add_cfgq:
+	cpfl_remove_cfgqs(adapter);
+err_cfgq_setup:
+err_init_ctrl_vport:
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+
+	return ret;
+}
+
 static struct virtchnl2_get_capabilities req_caps = {
 	.csum_caps =
 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
@@ -2019,6 +2277,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+	ret = cpfl_ctrl_path_open(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control path");
+		goto err_create_ctrl_vport;
+	}
+
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2026,6 +2290,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+err_create_ctrl_vport:
+	rte_free(adapter->vports);
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_whitelist_uninit(adapter);
@@ -2260,6 +2526,7 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index ed730cc0e9..90e71a6550 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -22,6 +22,7 @@
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
 #include "cpfl_representor.h"
+#include "cpfl_controlq.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -89,6 +90,10 @@
 
 #define CPFL_FLOW_FILE_LEN 100
 
+#define CPFL_RX_CFGQ_NUM	4
+#define CPFL_TX_CFGQ_NUM	4
+#define CPFL_CFGQ_NUM		8
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -189,10 +194,19 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_whitelist_hash;
+
+	/* ctrl vport and ctrl queues. */
+	struct cpfl_vport ctrl_vport;
+	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
+	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
+	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
 int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
 			   struct cpfl_vport_id *vport_identity,
 			   struct cpchnl2_vport_info *vport_info);
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
index a21a4a451f..932840a972 100644
--- a/drivers/net/cpfl/cpfl_vchnl.c
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 
 	return 0;
 }
+
+int
+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct virtchnl2_create_vport vport_msg;
+	struct idpf_cmd_info args;
+	int err = -1;
+
+	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
+	vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+	vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
+	vport_msg.num_tx_complq = 0;
+	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
+	vport_msg.num_rx_bufq = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+	args.in_args = (uint8_t *)&vport_msg;
+	args.in_args_size = sizeof(vport_msg);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+		return err;
+	}
+
+	rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
+		   IDPF_DFLT_MBX_BUF_SIZE);
+	return err;
+}
+
+int
+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_RX_CFGQ_NUM;
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (!vc_rxqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->base.vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		rxq_info = &vc_rxqs->qinfo[i];
+		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
+		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;
+		rxq_info->max_pkt_size = vport->base.max_pkt_len;
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_rxqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_TX_CFGQ_NUM;
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (!vc_txqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->base.vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		txq_info = &vc_txqs->qinfo[i];
+		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
+		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_txqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v2 5/8] net/cpfl: set up rte flow skeleton
  2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
                     ` (3 preceding siblings ...)
  2023-09-01 11:31   ` [PATCH v2 4/8] net/cpfl: setup ctrl path Yuying Zhang
@ 2023-09-01 11:31   ` Yuying Zhang
  2023-09-01 11:31   ` [PATCH v2 6/8] net/cpfl: add fxp rule module Yuying Zhang
                     ` (3 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Yuying Zhang @ 2023-09-01 11:31 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Yuying Zhang

Set up the rte_flow backend skeleton. Introduce the framework
to support different engines as rte_flow backend. Bridge rte_flow
driver API to flow engines.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  54 ++++++
 drivers/net/cpfl/cpfl_ethdev.h |   5 +
 drivers/net/cpfl/cpfl_flow.c   | 339 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow.h   |  85 +++++++++
 drivers/net/cpfl/meson.build   |   3 +-
 5 files changed, 485 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 22f3e72894..618a6a0fe2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -15,6 +15,7 @@
 #include "cpfl_ethdev.h"
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1074,6 +1075,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+	struct rte_flow *p_flow;
+
+	while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+		TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+		if (p_flow->engine->free)
+			p_flow->engine->free(p_flow);
+		rte_free(p_flow);
+	}
+}
+
 static int
 cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
 {
@@ -1105,6 +1119,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
 		cpfl_p2p_queue_grps_del(vport);
 
+	cpfl_flow_free(cpfl_vport);
 	idpf_vport_deinit(vport);
 	rte_free(cpfl_vport->p2p_q_chunks_info);
 
@@ -1117,6 +1132,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+		      const struct rte_flow_ops **ops)
+{
+	struct cpfl_itf *itf;
+
+	if (!dev)
+		return -EINVAL;
+
+	itf = CPFL_DEV_TO_ITF(dev);
+
+	/* only vport support rte_flow */
+	if (itf->type != CPFL_ITF_TYPE_VPORT)
+		return -ENOTSUP;
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static int
 cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 			    size_t len, uint32_t tx)
@@ -1318,6 +1356,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.xstats_get			= cpfl_dev_xstats_get,
 	.xstats_get_names		= cpfl_dev_xstats_get_names,
 	.xstats_reset			= cpfl_dev_xstats_reset,
+	.flow_ops_get			= cpfl_dev_flow_ops_get,
 	.hairpin_cap_get		= cpfl_hairpin_cap_get,
 	.rx_hairpin_queue_setup		= cpfl_rx_hairpin_queue_setup,
 	.tx_hairpin_queue_setup		= cpfl_tx_hairpin_queue_setup,
@@ -2283,6 +2322,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_create_ctrl_vport;
 	}
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	ret = cpfl_flow_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init flow module");
+		goto err_flow_init;
+	}
+#endif
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2290,6 +2336,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+err_flow_init:
+	cpfl_ctrl_path_close(adapter);
+#endif
 err_create_ctrl_vport:
 	rte_free(adapter->vports);
 err_vports_alloc:
@@ -2446,6 +2496,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
 	cpfl_vport->itf.adapter = adapter;
 	cpfl_vport->itf.data = dev->data;
+	TAILQ_INIT(&cpfl_vport->itf.flow_list);
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
@@ -2526,6 +2577,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	cpfl_flow_uninit(adapter);
+#endif
 	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 90e71a6550..40a27f8b74 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -143,9 +143,12 @@ enum cpfl_itf_type {
 	CPFL_ITF_TYPE_REPRESENTOR
 };
 
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
+	struct cpfl_flow_list flow_list;
 	void *data;
 };
 
@@ -195,6 +198,8 @@ struct cpfl_adapter_ext {
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_whitelist_hash;
 
+	struct cpfl_flow_js_parser *flow_parser;
+
 	/* ctrl vport and ctrl queues. */
 	struct cpfl_vport ctrl_vport;
 	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c
new file mode 100644
index 0000000000..03dd1ffa44
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.c
@@ -0,0 +1,339 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+	TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->parse_pattern_action)
+			continue;
+
+		if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+			continue;
+		return engine;
+	}
+
+	return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+	int ret;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->init) {
+			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+				     engine->type);
+			return -ENOTSUP;
+		}
+
+		ret = engine->init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+				     engine->type);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (engine->uninit)
+			engine->uninit(adapter);
+	}
+}
+
+static int
+cpfl_flow_attr_valid(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	if (attr->priority > CPFL_PREC_MAX) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Only support priority 0-7.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	int ret;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = cpfl_flow_attr_valid(attr, error);
+	if (ret)
+		return ret;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+__cpfl_flow_validate(struct rte_eth_dev *dev,
+		     const struct rte_flow_attr *attr,
+		     const struct rte_flow_item pattern[],
+		     const struct rte_flow_action actions[],
+		     void **meta,
+		     struct cpfl_flow_engine **engine,
+		     struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+	if (ret)
+		return ret;
+
+	*engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta);
+	if (!*engine) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched engine.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	int ret;
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error);
+
+	return ret;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_engine *engine = NULL;
+	struct rte_flow *flow;
+	void *meta;
+	int ret;
+
+	flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return NULL;
+	}
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	if (!engine->create) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched flow creation function");
+		rte_free(flow);
+		return NULL;
+	}
+
+	ret = engine->create(dev, flow, meta, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	flow->engine = engine;
+	TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+	return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	int ret = 0;
+
+	if (!flow || !flow->engine || !flow->engine->destroy) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	ret = flow->engine->destroy(dev, flow, error);
+	if (!ret)
+		TAILQ_REMOVE(&itf->flow_list, flow, next);
+	else
+		PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+	return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct rte_flow *p_flow;
+	void *temp;
+	int ret = 0;
+
+	RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+		ret = cpfl_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to flush flows");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		const struct rte_flow_action *actions,
+		void *data,
+		struct rte_flow_error *error)
+{
+	struct rte_flow_query_count *count = data;
+	int ret = -EINVAL;
+
+	if (!flow || !flow->engine || !flow->engine->query_count) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			ret = flow->engine->query_count(dev, flow, count, error);
+			break;
+		default:
+			ret = rte_flow_error_set(error, ENOTSUP,
+						 RTE_FLOW_ERROR_TYPE_ACTION,
+						 actions,
+						 "action not supported");
+			break;
+		}
+	}
+
+	return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+	.validate = cpfl_flow_validate,
+	.create = cpfl_flow_create,
+	.destroy = cpfl_flow_destroy,
+	.flush = cpfl_flow_flush,
+	.query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+	int ret;
+
+	if (ad->devargs.flow_parser[0] == '\0') {
+		PMD_INIT_LOG(WARNING, "flow module is not initialized");
+		return 0;
+	}
+
+	ret = cpfl_flow_engine_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init flow engines");
+		goto err;
+	}
+
+	ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to create flow parser");
+		goto err;
+	}
+
+	return ret;
+
+err:
+	cpfl_flow_engine_uninit(ad);
+	return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+	if (ad->devargs.flow_parser[0] == '\0')
+		return;
+
+	cpfl_parser_destroy(ad->flow_parser);
+	cpfl_flow_engine_uninit(ad);
+}
diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h
new file mode 100644
index 0000000000..8c19b853ca
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.h
@@ -0,0 +1,85 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#define CPFL_PREC_MAX 7
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+	CPFL_FLOW_ENGINE_NONE = 0,
+	CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+			       struct rte_flow *flow,
+			       void *meta,
+			       struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+				struct rte_flow *flow,
+				struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+			      struct rte_flow *flow,
+			      struct rte_flow_query_count *count,
+			      struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+					     const struct rte_flow_attr *attr,
+					     const struct rte_flow_item pattern[],
+					     const struct rte_flow_action actions[],
+					     void **meta);
+
+struct cpfl_flow_engine {
+	TAILQ_ENTRY(cpfl_flow_engine) node;
+	enum cpfl_flow_engine_type type;
+	engine_init_t init;
+	engine_uninit_t uninit;
+	engine_create_t create;
+	engine_destroy_t destroy;
+	engine_query_t query_count;
+	engine_free_t free;
+	engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) next;
+	struct cpfl_flow_engine *engine;
+	void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev,
+						const struct rte_flow_attr *attr,
+						const struct rte_flow_item pattern[],
+						const struct rte_flow_action actions[],
+						void **meta);
+int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+				  const struct rte_flow_attr *attr,
+				  const struct rte_flow_item pattern[],
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+		    struct rte_flow *flow,
+		    const struct rte_flow_action *actions,
+		    void *data,
+		    struct rte_flow_error *error);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 9a8d25ffae..4951ea1c4a 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -43,9 +43,10 @@ endif
 js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
     sources += files(
+        'cpfl_flow.c',
         'cpfl_flow_parser.c',
         'cpfl_rules.c',
     )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
-endif
\ No newline at end of file
+endif
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v2 6/8] net/cpfl: add fxp rule module
  2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
                     ` (4 preceding siblings ...)
  2023-09-01 11:31   ` [PATCH v2 5/8] net/cpfl: set up rte flow skeleton Yuying Zhang
@ 2023-09-01 11:31   ` Yuying Zhang
  2023-09-01 11:31   ` [PATCH v2 7/8] net/cpfl: add fxp flow engine Yuying Zhang
                     ` (2 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Yuying Zhang @ 2023-09-01 11:31 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Yuying Zhang

Added low level fxp module for rule packing / creation / destroying.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_controlq.c | 424 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  24 ++
 drivers/net/cpfl/cpfl_ethdev.c   |  31 +++
 drivers/net/cpfl/cpfl_ethdev.h   |   6 +
 drivers/net/cpfl/cpfl_fxp_rule.c | 297 ++++++++++++++++++++++
 drivers/net/cpfl/cpfl_fxp_rule.h |  68 +++++
 drivers/net/cpfl/meson.build     |   1 +
 7 files changed, 851 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h

diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
index 476c78f235..ed76282b0c 100644
--- a/drivers/net/cpfl/cpfl_controlq.c
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -331,6 +331,402 @@ cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
 	return status;
 }
 
+/**
+ * cpfl_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+	       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_desc *desc;
+	int num_desc_avail = 0;
+	int status = 0;
+	int i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	/* Ensure there are enough descriptors to send all messages */
+	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+		status = -ENOSPC;
+		goto sq_send_command_out;
+	}
+
+	for (i = 0; i < num_q_msg; i++) {
+		struct idpf_ctlq_msg *msg = &q_msg[i];
+		uint64_t msg_cookie;
+
+		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+		desc->opcode = CPU_TO_LE16(msg->opcode);
+		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+		msg_cookie = *(uint64_t *)&msg->cookie;
+		desc->cookie_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));
+		desc->cookie_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));
+		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+				IDPF_CTLQ_FLAG_HOST_ID_S);
+		if (msg->data_len) {
+			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+			desc->datalen |= CPU_TO_LE16(msg->data_len);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+			/* Update the address values in the desc with the pa
+			 * value for respective buffer
+			 */
+			desc->params.indirect.addr_high =
+				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+			desc->params.indirect.addr_low =
+				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		} else {
+			idpf_memcpy(&desc->params, msg->ctx.direct,
+				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		}
+
+		/* Store buffer info */
+		cq->bi.tx_msg[cq->next_to_use] = msg;
+		(cq->next_to_use)++;
+		if (cq->next_to_use == cq->ring_size)
+			cq->next_to_use = 0;
+	}
+
+	/* Force memory write to complete before letting hardware
+	 * know that there are new descriptors to fetch.
+	 */
+	idpf_wmb();
+	wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+	idpf_release_lock(&cq->cq_lock);
+
+	return status;
+}
+
+/**
+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
+ * back for the requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ * @force: (input) clean descriptors which were not done yet. Use with caution
+ * in kernel mode only
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+static int
+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		     struct idpf_ctlq_msg *msg_status[], bool force)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t i = 0, num_to_clean;
+	uint16_t ntc, desc_err;
+	int ret = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*clean_count == 0)
+		return 0;
+	if (*clean_count > cq->ring_size)
+		return -EINVAL;
+
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *clean_count;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		desc_err = LE16_TO_CPU(desc->ret_val);
+		if (desc_err) {
+			/* strip off FW internal code */
+			desc_err &= 0xff;
+		}
+
+		msg_status[i] = cq->bi.tx_msg[ntc];
+		if (!msg_status[i])
+			break;
+		msg_status[i]->status = desc_err;
+		cq->bi.tx_msg[ntc] = NULL;
+		/* Zero out any stale data */
+		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	}
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+
+	/* Return number of descriptors actually cleaned */
+	*clean_count = i;
+
+	return ret;
+}
+
+/**
+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		   struct idpf_ctlq_msg *msg_status[])
+{
+	return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
+}
+
+/**
+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t ntp = cq->next_to_post;
+	bool buffs_avail = false;
+	uint16_t tbp = ntp + 1;
+	int status = 0;
+	int i = 0;
+
+	if (*buff_count > cq->ring_size)
+		return -EINVAL;
+
+	if (*buff_count > 0)
+		buffs_avail = true;
+	idpf_acquire_lock(&cq->cq_lock);
+	if (tbp >= cq->ring_size)
+		tbp = 0;
+
+	if (tbp == cq->next_to_clean)
+		/* Nothing to do */
+		goto post_buffs_out;
+
+	/* Post buffers for as many as provided or up until the last one used */
+	while (ntp != cq->next_to_clean) {
+		desc = IDPF_CTLQ_DESC(cq, ntp);
+		if (cq->bi.rx_buff[ntp])
+			goto fill_desc;
+		if (!buffs_avail) {
+			/* If the caller hasn't given us any buffers or
+			 * there are none left, search the ring itself
+			 * for an available buffer to move to this
+			 * entry starting at the next entry in the ring
+			 */
+			tbp = ntp + 1;
+			/* Wrap ring if necessary */
+			if (tbp >= cq->ring_size)
+				tbp = 0;
+
+			while (tbp != cq->next_to_clean) {
+				if (cq->bi.rx_buff[tbp]) {
+					cq->bi.rx_buff[ntp] =
+						cq->bi.rx_buff[tbp];
+					cq->bi.rx_buff[tbp] = NULL;
+
+					/* Found a buffer, no need to
+					 * search anymore
+					 */
+					break;
+				}
+
+				/* Wrap ring if necessary */
+				tbp++;
+				if (tbp >= cq->ring_size)
+					tbp = 0;
+			}
+
+			if (tbp == cq->next_to_clean)
+				goto post_buffs_out;
+		} else {
+			/* Give back pointer to DMA buffer */
+			cq->bi.rx_buff[ntp] = buffs[i];
+			i++;
+
+			if (i >= *buff_count)
+				buffs_avail = false;
+		}
+
+fill_desc:
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+		/* Post buffers to descriptor */
+		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+		ntp++;
+		if (ntp == cq->ring_size)
+			ntp = 0;
+	}
+
+post_buffs_out:
+	/* Only update tail if buffers were actually posted */
+	if (cq->next_to_post != ntp) {
+		if (ntp)
+			/* Update next_to_post to ntp - 1 since current ntp
+			 * will not have a buffer
+			 */
+			cq->next_to_post = ntp - 1;
+		else
+			/* Wrap to end of end ring since current ntp is 0 */
+			cq->next_to_post = cq->ring_size - 1;
+
+		wr32(hw, cq->reg.tail, cq->next_to_post);
+	}
+
+	idpf_release_lock(&cq->cq_lock);
+	/* return the number of buffers that were not posted */
+	*buff_count = *buff_count - i;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+	       struct idpf_ctlq_msg *q_msg)
+{
+	uint16_t num_to_clean, ntc, ret_val, flags;
+	struct idpf_ctlq_desc *desc;
+	int ret_code = 0;
+	uint16_t i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*num_q_msg == 0)
+		return 0;
+	else if (*num_q_msg > cq->ring_size)
+		return -EINVAL;
+
+	/* take the lock before we start messing with the ring */
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *num_q_msg;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		flags = LE16_TO_CPU(desc->flags);
+		if (!(flags & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		ret_val = LE16_TO_CPU(desc->ret_val);
+		q_msg[i].vmvf_type = (flags &
+				     (IDPF_CTLQ_FLAG_FTYPE_VM |
+				      IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+				      IDPF_CTLQ_FLAG_FTYPE_S;
+
+		if (flags & IDPF_CTLQ_FLAG_ERR)
+			ret_code = -EBADMSG;
+
+		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
+		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
+		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+		q_msg[i].status = ret_val;
+
+		if (desc->datalen) {
+			idpf_memcpy(q_msg[i].ctx.indirect.context,
+				    &desc->params.indirect,
+				    IDPF_INDIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+
+			/* Assign pointer to dma buffer to ctlq_msg array
+			 * to be given to upper layer
+			 */
+			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+			/* Zero out pointer to DMA buffer info;
+			 * will be repopulated by post buffers API
+			 */
+			cq->bi.rx_buff[ntc] = NULL;
+		} else {
+			idpf_memcpy(q_msg[i].ctx.direct,
+				    desc->params.raw,
+				    IDPF_DIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+		}
+
+		/* Zero out stale data in descriptor */
+		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+			    IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	};
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+	*num_q_msg = i;
+	if (*num_q_msg == 0)
+		ret_code = -ENOMSG;
+
+	return ret_code;
+}
+
 int
 cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
 		    struct idpf_ctlq_info **cq)
@@ -377,3 +773,31 @@ cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
 {
 	cpfl_ctlq_remove(hw, cq);
 }
+
+int
+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		     struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			      uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
+
+int
+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			 struct idpf_ctlq_msg *msg_status[])
+{
+	return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
index 930d717f63..740ae6522c 100644
--- a/drivers/net/cpfl/cpfl_controlq.h
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -14,6 +14,13 @@
 #define CPFL_DFLT_MBX_RING_LEN		512
 #define CPFL_CFGQ_RING_LEN		512
 
+/* CRQ/CSQ specific error codes */
+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */
+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */
+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */
+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */
+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */
+
 /* Generic queue info structures */
 /* MB, CONFIG and EVENT q do not have extended info */
 struct cpfl_ctlq_create_info {
@@ -44,8 +51,25 @@ int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
 int cpfl_ctlq_add(struct idpf_hw *hw,
 		  struct cpfl_ctlq_create_info *qinfo,
 		  struct idpf_ctlq_info **cq);
+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+		       struct idpf_ctlq_msg *msg_status[]);
+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			    u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg);
 int cpfl_vport_ctlq_add(struct idpf_hw *hw,
 			struct cpfl_ctlq_create_info *qinfo,
 			struct idpf_ctlq_info **cq);
 void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			 u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+			 struct idpf_ctlq_msg q_msg[]);
+
+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+				  u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+			     struct idpf_ctlq_msg *msg_status[]);
 #endif
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 618a6a0fe2..08a55f0352 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 #include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
 	adapter->vports[vport->sw_idx] = NULL;
+	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
 	rte_free(cpfl_vport);
 
 	return 0;
@@ -2462,6 +2464,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
 	return 0;
 }
 
+int
+cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size,
+			 int batch_size)
+{
+	int i;
+
+	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
+		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < batch_size; i++) {
+		dma[i].va = (void *)((uint64_t)orig_dma->va + size * (i + 1));
+		dma[i].pa = orig_dma->pa + size * (i + 1);
+		dma[i].size = size;
+		dma[i].zone = NULL;
+	}
+	return 0;
+}
+
 static int
 cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
@@ -2511,6 +2533,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
 			    &dev->data->mac_addrs[0]);
 
+	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
+	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
+				       cpfl_vport->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		goto err_mac_addrs;
+
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
 		memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
 		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 40a27f8b74..c29da92e81 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -145,10 +145,14 @@ enum cpfl_itf_type {
 
 TAILQ_HEAD(cpfl_flow_list, rte_flow);
 
+#define CPFL_FLOW_BATCH_SIZE  490
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
 	struct cpfl_flow_list flow_list;
+	struct idpf_dma_mem flow_dma;
+	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
 	void *data;
 };
 
@@ -222,6 +226,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 			   struct cpchnl2_vport_id *vport_id,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_info_response *response);
+int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma,
+			     uint32_t size, int batch_size);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
new file mode 100644
index 0000000000..f87ccc9f77
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.c
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		   struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_msg **msg_ptr_list;
+	u16 clean_count = 0;
+	int num_cleaned = 0;
+	int retries = 0;
+	int ret = 0;
+
+	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+	if (!msg_ptr_list) {
+		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+		goto send_err;
+	}
+
+	while (retries <= CTLQ_SEND_RETRIES) {
+		clean_count = num_q_msg - num_cleaned;
+		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+					       &msg_ptr_list[num_cleaned]);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+			goto send_err;
+		}
+
+		num_cleaned += clean_count;
+		retries++;
+		if (num_cleaned >= num_q_msg)
+			break;
+		rte_delay_us_sleep(10);
+	}
+
+	if (retries > CTLQ_SEND_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for completions");
+		ret = -1;
+		goto send_err;
+	}
+
+send_err:
+	if (msg_ptr_list)
+		free(msg_ptr_list);
+err:
+	return ret;
+}
+
+static int
+cpfl_process_rx_ctlq_msg(u16 num_q_msg, struct idpf_ctlq_msg *q_msg)
+{
+	u16 i;
+	int ret = 0;
+
+	if (!num_q_msg || !q_msg)
+		return -EINVAL;
+
+	for (i = 0; i < num_q_msg; i++) {
+		if (q_msg[i].status == CPFL_CFG_PKT_ERR_OK) {
+			continue;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_EEXIST &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_add_rule) {
+			PMD_INIT_LOG(ERR, "The rule has confliction with already existed one");
+			return -EINVAL;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_ENOTFND &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_del_rule) {
+			PMD_INIT_LOG(ERR, "The rule has already deleted");
+			return -EINVAL;
+		} else {
+			PMD_INIT_LOG(ERR, "Invalid rule");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		      struct idpf_ctlq_msg q_msg[])
+{
+	int retries = 0;
+	struct idpf_dma_mem *dma;
+	u16 i;
+	uint16_t buff_cnt;
+	int ret = 0, handle_rule = 0;
+
+	retries = 0;
+	while (retries <= CTLQ_RECEIVE_RETRIES) {
+		rte_delay_us_sleep(10);
+		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+		    ret != CPFL_ERR_CTLQ_ERROR) {
+			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_EMPTY)
+			break;
+
+		ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
+		if (ret) {
+			PMD_INIT_LOG(WARNING, "failed to process rx_ctrlq msg");
+			handle_rule = ret;
+		}
+
+		for (i = 0; i < num_q_msg; i++) {
+			if (q_msg[i].data_len > 0)
+				dma = q_msg[i].ctx.indirect.payload;
+			else
+				dma = NULL;
+
+			buff_cnt = dma ? 1 : 0;
+			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+			if (ret)
+				PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+		}
+		break;
+	}
+
+	if (retries > CTLQ_RECEIVE_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+		ret = -1;
+	}
+
+	return ret + handle_rule;
+}
+
+static int
+cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		   struct idpf_ctlq_msg *msg)
+{
+	struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	struct cpfl_rule_cfg_data cfg = {0};
+
+	/* prepare rule blob */
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(&cfg, 0, sizeof(cfg));
+
+	/* fill info for both query and add/update */
+	cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+				   minfo->pin_mod_content,
+				   minfo->mod_index,
+				   &cfg.ext.mod_content);
+
+	/* only fill content for add/update */
+	memcpy(blob->mod_blob, minfo->mod_content,
+	       minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+	/* pack message */
+	cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+				       rinfo->cookie,
+				       0, /* vsi_id not used for mod */
+				       rinfo->port_num,
+				       NO_HOST_NEEDED,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       (u16)sizeof(*blob),
+				       (void *)dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		       struct idpf_ctlq_msg *msg, bool add)
+{
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	struct cpfl_rule_cfg_data cfg;
+	uint16_t cfg_ctrl;
+
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(msg, 0, sizeof(*msg));
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+							  rinfo->sem.sub_prof_id,
+							  rinfo->sem.pin_to_cache,
+							  rinfo->sem.fixed_fetch);
+		cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+					rinfo->act_bytes, rinfo->act_byte_len,
+					cfg_ctrl, blob);
+		opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+	} else {
+		PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
+		return -1;
+	}
+
+	cpfl_fill_rule_cfg_data_common(opc,
+				       rinfo->cookie,
+				       rinfo->vsi,
+				       rinfo->port_num,
+				       rinfo->host_id,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+	       struct idpf_ctlq_msg *msg, bool add)
+{
+	int ret = 0;
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
+			ret = -1;
+	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+		if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)
+			ret = -1;
+	} else {
+		PMD_INIT_LOG(ERR, "Invalid type of rule");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+int
+cpfl_rule_process(struct cpfl_itf *itf,
+		  struct idpf_ctlq_info *tx_cq,
+		  struct idpf_ctlq_info *rx_cq,
+		  struct cpfl_rule_info *rinfo,
+		  int rule_num,
+		  bool add)
+{
+	struct idpf_hw *hw = &itf->adapter->base.hw;
+	int i;
+	int ret = 0;
+
+	if (rule_num == 0)
+		return 0;
+
+	for (i = 0; i < rule_num; i++) {
+		ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Could not pack rule");
+			return ret;
+		}
+	}
+	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to send control message");
+		return ret;
+	}
+	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to update rule");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
new file mode 100644
index 0000000000..ed757b80b1
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+	uint16_t prof_id;
+	uint8_t sub_prof_id;
+	uint8_t key[CPFL_MAX_KEY_LEN];
+	uint8_t key_byte_len;
+	uint8_t pin_to_cache;
+	uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+	uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+	uint8_t mod_content_byte_len;
+	uint32_t mod_index;
+	uint8_t pin_mod_content;
+	uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+	CPFL_RULE_TYPE_NONE,
+	CPFL_RULE_TYPE_SEM,
+	CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+	enum cpfl_rule_type type;
+	uint64_t cookie;
+	uint8_t host_id;
+	uint8_t port_num;
+	uint8_t resp_req;
+	/* TODO: change this to be dynamically allocated/reallocated */
+	uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+	uint8_t act_byte_len;
+	/* vsi is used for lem and lpm rules */
+	uint16_t vsi;
+	uint8_t clear_mirror_1st_state;
+	/* mod related fields */
+	union {
+		struct cpfl_mod_rule_info mod;
+		struct cpfl_sem_rule_info sem;
+	};
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_process(struct cpfl_itf *itf,
+		      struct idpf_ctlq_info *tx_cq,
+		      struct idpf_ctlq_info *rx_cq,
+		      struct cpfl_rule_info *rinfo,
+		      int rule_num,
+		      bool add);
+int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		       struct idpf_ctlq_msg q_msg[]);
+int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+			  struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 4951ea1c4a..f5d92a019e 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -46,6 +46,7 @@ if js_dep.found()
         'cpfl_flow.c',
         'cpfl_flow_parser.c',
         'cpfl_rules.c',
+	    'cpfl_fxp_rule.c',
     )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v2 7/8] net/cpfl: add fxp flow engine
  2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
                     ` (5 preceding siblings ...)
  2023-09-01 11:31   ` [PATCH v2 6/8] net/cpfl: add fxp rule module Yuying Zhang
@ 2023-09-01 11:31   ` Yuying Zhang
  2023-09-01 11:31   ` [PATCH v2 8/8] net/cpfl: add flow support for representor Yuying Zhang
  2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
  8 siblings, 0 replies; 128+ messages in thread
From: Yuying Zhang @ 2023-09-01 11:31 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Yuying Zhang

Adapt fxp low level as a flow engine.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h          |  81 ++++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 583 ++++++++++++++++++++++++
 drivers/net/cpfl/meson.build            |   1 +
 3 files changed, 665 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index c29da92e81..0dbf8fb21b 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -92,6 +92,8 @@
 
 #define CPFL_RX_CFGQ_NUM	4
 #define CPFL_TX_CFGQ_NUM	4
+#define CPFL_FPCP_CFGQ_TX       0
+#define CPFL_FPCP_CFGQ_RX       1
 #define CPFL_CFGQ_NUM		8
 
 struct cpfl_vport_param {
@@ -203,6 +205,8 @@ struct cpfl_adapter_ext {
 	struct rte_hash *repr_whitelist_hash;
 
 	struct cpfl_flow_js_parser *flow_parser;
+	struct rte_bitmap *mod_bm;
+	void *mod_bm_mem;
 
 	/* ctrl vport and ctrl queues. */
 	struct cpfl_vport ctrl_vport;
@@ -239,5 +243,82 @@ int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem
 	((struct cpfl_repr *)((dev)->data->dev_private))
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
+#define CPFL_INVALID_HW_ID      UINT16_MAX
 
+static inline uint16_t
+cpfl_get_port_id(struct cpfl_itf *itf)
+{
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		struct cpfl_vport *vport = (void *)itf;
+
+		return vport->base.devarg_id;
+	}
+
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline uint16_t
+cpfl_get_vsi_id(struct cpfl_itf *itf)
+{
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_vport_info *info;
+	uint32_t vport_id;
+	int ret;
+	struct cpfl_vport_id vport_identity;
+
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		struct cpfl_repr *repr = (void *)itf;
+
+		return repr->vport_info->vport_info.vsi_id;
+	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
+		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		/* host: HOST0_CPF_ID, acc: ACC_CPF_ID */
+		vport_identity.pf_id = ACC_CPF_ID;
+		vport_identity.vf_id = 0;
+		vport_identity.vport_id = vport_id;
+
+		ret = rte_hash_lookup_data(adapter->vport_map_hash,
+					   &vport_identity, (void **)&info);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "vport id not exist");
+			goto err;
+		}
+
+		return info->vport_info.vsi_id;
+	}
+
+err:
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline struct cpfl_itf *
+cpfl_get_itf_by_port_id(uint16_t port_id)
+{
+	struct rte_eth_dev *dev;
+
+	if (port_id >= RTE_MAX_ETHPORTS) {
+		PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS);
+		return NULL;
+	}
+
+	dev = &rte_eth_devices[port_id];
+	if (dev->state == RTE_ETH_DEV_UNUSED) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id);
+		return NULL;
+	}
+
+	if (!dev->data) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id);
+		return NULL;
+	}
+
+	return CPFL_DEV_TO_ITF(dev);
+}
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
new file mode 100644
index 0000000000..e0c08a77c3
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <math.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_memcpy.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include <ethdev_driver.h>
+#include "cpfl_rules.h"
+#include "cpfl_logs.h"
+#include "cpfl_ethdev.h"
+#include "cpfl_flow.h"
+#include "cpfl_fxp_rule.h"
+#include "cpfl_flow_parser.h"
+
+#define CPFL_COOKIE_DEF		0x1000
+#define CPFL_MOD_COOKIE_DEF	0x1237561
+#define CPFL_PREC_DEF		1
+#define CPFL_PREC_SET		5
+#define CPFL_TYPE_ID		3
+#define CPFL_OFFSET		0x0a
+#define CPFL_HOST_ID_DEF	0
+#define CPFL_PF_NUM_DEF		0
+#define CPFL_PORT_NUM_DEF	0
+#define CPFL_RESP_REQ_DEF	2
+#define CPFL_PIN_TO_CACHE_DEF	0
+#define CPFL_CLEAR_MIRROR_1ST_STATE_DEF	0
+#define CPFL_FIXED_FETCH_DEF	0
+#define CPFL_PTI_DEF		0
+#define CPFL_MOD_OBJ_SIZE_DEF	0
+#define CPFL_PIN_MOD_CONTENT_DEF	0
+
+#define CPFL_MAX_MOD_CONTENT_INDEX	256
+#define CPFL_MAX_MR_ACTION_NUM	8
+
+/* Struct used when parse detailed rule information with json file */
+struct cpfl_rule_info_meta {
+	struct cpfl_flow_pr_action pr_action;	/* json action field of pattern rule */
+	uint32_t pr_num;			/* number of pattern rules */
+	uint32_t mr_num;			/* number of modification rules */
+	uint32_t rule_num;			/* number of all rules */
+	struct cpfl_rule_info rules[0];
+};
+
+static uint32_t cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad);
+static void cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
+uint64_t cpfl_rule_cookie = CPFL_COOKIE_DEF;
+
+static int
+cpfl_fxp_create(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim = meta;
+	struct cpfl_vport *vport;
+
+	if (!rim)
+		return ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		/* Every vport has one pair control queues configured to handle message.
+		 * Even index is tx queue and odd index is rx queue.
+		 */
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
+				rim->rules, rim->rule_num, true);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "cpfl filter create flow fail");
+		rte_free(rim);
+		return ret;
+	}
+
+	flow->rule = rim;
+
+	return ret;
+}
+
+static inline void
+cpfl_fxp_rule_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+	flow->rule = NULL;
+}
+
+static int
+cpfl_fxp_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim;
+	uint32_t i;
+	struct cpfl_vport *vport;
+
+	rim = flow->rule;
+	if (!rim) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "no such flow create by cpfl filter");
+
+		return -rte_errno;
+	}
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		ret = -rte_errno;
+		goto err;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules,
+				rim->rule_num, false);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to destroy cpfl filter rule");
+		goto err;
+	}
+
+	/* free mod index */
+	for (i = rim->pr_num; i < rim->rule_num; i++)
+		cpfl_fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
+err:
+	cpfl_fxp_rule_free(flow);
+	return ret;
+}
+
+static bool
+cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action,
+		       struct cpfl_rule_info_meta *rim,
+		       int i)
+{
+	if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		struct cpfl_rule_info *rinfo = &rim->rules[i];
+
+		rinfo->type = CPFL_RULE_TYPE_SEM;
+		rinfo->sem.prof_id = pr_action->sem.prof;
+		rinfo->sem.sub_prof_id = pr_action->sem.subprof;
+		rinfo->sem.key_byte_len = pr_action->sem.keysize;
+		rte_memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len);
+		rinfo->sem.pin_to_cache = CPFL_PIN_TO_CACHE_DEF;
+		rinfo->sem.fixed_fetch = CPFL_FIXED_FETCH_DEF;
+	} else {
+		PMD_DRV_LOG(ERR, "Invalid pattern item.");
+		return false;
+	}
+
+	return true;
+}
+
+static int
+cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_rule_info *match_rinfo,
+		       struct cpfl_rule_info *mod_rinfo,
+		       const struct cpfl_flow_mr_action *mr_action)
+{
+	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
+	uint32_t mod_idx;
+	int i;
+	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
+	union cpfl_action_set *act_set =
+		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
+
+	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
+		return -EINVAL;
+
+	*act_set = cpfl_act_mod_profile(CPFL_PREC_DEF,
+					mr_action->mod.prof,
+					CPFL_PTI_DEF,
+					0, /* append */
+					0, /* prepend */
+					CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_idx = cpfl_fxp_mod_idx_alloc(adapter);
+	if (mod_idx == CPFL_MAX_MOD_CONTENT_INDEX) {
+		PMD_DRV_LOG(ERR, "Out of Mod Index.");
+		return -ENOMEM;
+	}
+
+	*act_set = cpfl_act_mod_addr(CPFL_PREC_DEF, mod_idx);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
+	minfo->mod_obj_size = CPFL_MOD_OBJ_SIZE_DEF;
+	minfo->pin_mod_content = CPFL_PIN_MOD_CONTENT_DEF;
+	minfo->mod_index = mod_idx;
+	mod_rinfo->cookie = CPFL_MOD_COOKIE_DEF;
+	mod_rinfo->port_num = CPFL_PORT_NUM_DEF;
+	mod_rinfo->resp_req = CPFL_RESP_REQ_DEF;
+
+	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
+	for (i = 0; i < minfo->mod_content_byte_len; i++)
+		minfo->mod_content[i] = mr_action->mod.data[i];
+
+	return 0;
+}
+
+#define CPFL_FXP_MAX_QREGION_SIZE 128
+#define CPFL_INVALID_QUEUE_ID -2
+static int
+cpfl_fxp_parse_action(struct cpfl_itf *itf,
+		      const struct rte_flow_action *actions,
+		      const struct cpfl_flow_mr_action *mr_action,
+		      struct cpfl_rule_info_meta *rim,
+		      int priority,
+		      int index)
+{
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *rss;
+	struct rte_eth_dev_data *data;
+	enum rte_flow_action_type action_type;
+	struct cpfl_vport *vport;
+	/* used when action is PORT_REPRESENTOR type */
+	struct cpfl_itf *dst_itf;
+	uint16_t dev_id; /* vsi id */
+	int queue_id = -1;
+	bool fwd_vsi = false;
+	bool fwd_q = false;
+	uint32_t i;
+	struct cpfl_rule_info *rinfo = &rim->rules[index];
+	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
+
+	priority = CPFL_PREC_MAX - priority;
+	for (action = actions; action->type !=
+	     RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			if (!fwd_vsi)
+				fwd_vsi = true;
+			else
+				goto err;
+
+			act_ethdev = action->conf;
+			dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id);
+
+			if (!dst_itf)
+				goto err;
+
+			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
+				vport = (struct cpfl_vport *)dst_itf;
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			} else {
+				queue_id = CPFL_INVALID_QUEUE_ID;
+			}
+
+			dev_id = cpfl_get_vsi_id(dst_itf);
+
+			if (dev_id == CPFL_INVALID_HW_ID)
+				goto err;
+
+			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			act_q = action->conf;
+			data = itf->data;
+			if (act_q->index >= data->nb_rx_queues)
+				goto err;
+
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += act_q->index;
+			*act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			if (rss->queue_num <= 1)
+				goto err;
+			for (i = 0; i < rss->queue_num - 1; i++) {
+				if (rss->queue[i + 1] != rss->queue[i] + 1)
+					goto err;
+			}
+			data = itf->data;
+			if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues)
+				goto err;
+			if (!(rte_is_power_of_2(rss->queue_num) &&
+			      rss->queue_num <= CPFL_FXP_MAX_QREGION_SIZE))
+				goto err;
+
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += rss->queue[0];
+			*act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id,
+								  log(rss->queue_num) / log(2), 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			(*act_set).data = cpfl_act_drop(priority).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			(*act_set).data = cpfl_act_set_commit_mode(priority, 0).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			goto err;
+		}
+	}
+
+	if (mr_action) {
+		uint32_t i;
+
+		for (i = 0; i < rim->mr_num; i++)
+			if (cpfl_parse_mod_content(itf->adapter, rinfo,
+						   &rim->rules[rim->pr_num + i],
+						   &mr_action[i]))
+				goto err;
+	}
+
+	return 0;
+
+err:
+	PMD_DRV_LOG(ERR, "Invalid action type");
+	return -EINVAL;
+}
+
+static void
+cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo)
+{
+	if (cpfl_rule_cookie == ~0llu)
+		cpfl_rule_cookie = CPFL_COOKIE_DEF;
+	rinfo->cookie = cpfl_rule_cookie++;
+	rinfo->host_id = CPFL_HOST_ID_DEF;
+	rinfo->port_num = CPFL_PORT_NUM_DEF;
+	rinfo->resp_req = CPFL_RESP_REQ_DEF;
+	rinfo->clear_mirror_1st_state = CPFL_CLEAR_MIRROR_1ST_STATE_DEF;
+}
+
+static bool
+cpfl_is_mod_action(const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			return true;
+		default:
+			continue;
+		}
+	}
+	return false;
+}
+
+static int
+cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      void **meta)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_pr_action pr_action = { 0 };
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_flow_mr_action mr_action[CPFL_MAX_MR_ACTION_NUM] = { 0 };
+	uint32_t pr_num = 0;
+	uint32_t mr_num = 0;
+	struct cpfl_rule_info_meta *rim;
+	int ret;
+
+	ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr, &pr_action);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "No Match pattern support.");
+		return -EINVAL;
+	}
+
+	if (cpfl_is_mod_action(actions)) {
+		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "action parse fails.");
+			return -EINVAL;
+		}
+		mr_num++;
+	}
+
+	pr_num = 1;
+	rim = rte_zmalloc(NULL,
+			  sizeof(struct cpfl_rule_info_meta) +
+			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
+			  0);
+	if (!rim)
+		return -ENOMEM;
+
+	rim->pr_action = pr_action;
+	rim->pr_num = pr_num;
+	rim->mr_num = mr_num;
+	rim->rule_num = pr_num + mr_num;
+
+	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid pattern");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid action");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	cpfl_fill_rinfo_default_value(&rim->rules[0]);
+
+	if (!meta)
+		rte_free(rim);
+	else
+		*meta = rim;
+
+	return 0;
+}
+
+static int
+cpfl_fxp_mod_init(struct cpfl_adapter_ext *ad)
+{
+	uint32_t size = rte_bitmap_get_memory_footprint(CPFL_MAX_MOD_CONTENT_INDEX);
+	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+	if (!mem)
+		return -ENOMEM;
+
+	/* a set bit represent a free slot */
+	ad->mod_bm = rte_bitmap_init_with_all_set(CPFL_MAX_MOD_CONTENT_INDEX, mem, size);
+	if (!ad->mod_bm) {
+		rte_free(mem);
+		return -EINVAL;
+	}
+
+	ad->mod_bm_mem = mem;
+
+	return 0;
+}
+
+static void
+cpfl_fxp_mod_uninit(struct cpfl_adapter_ext *ad)
+{
+	rte_free(ad->mod_bm_mem);
+	ad->mod_bm_mem = NULL;
+	ad->mod_bm = NULL;
+}
+
+static uint32_t
+cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad)
+{
+	uint64_t slab = 0;
+	uint32_t pos = 0;
+
+	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
+		return CPFL_MAX_MOD_CONTENT_INDEX;
+
+	pos += __builtin_ffsll(slab) - 1;
+	rte_bitmap_clear(ad->mod_bm, pos);
+
+	return pos;
+}
+
+static void
+cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
+{
+	rte_bitmap_set(ad->mod_bm, idx);
+}
+
+static int
+cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
+	       struct rte_flow *flow __rte_unused,
+	       struct rte_flow_query_count *count __rte_unused,
+	       struct rte_flow_error *error)
+{
+	rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_HANDLE,
+			   NULL,
+			   "count action not supported by this module");
+
+	return -rte_errno;
+}
+
+static void
+cpfl_fxp_uninit(struct cpfl_adapter_ext *ad)
+{
+	cpfl_fxp_mod_uninit(ad);
+}
+
+static int
+cpfl_fxp_init(struct cpfl_adapter_ext *ad)
+{
+	int ret = 0;
+
+	ret = cpfl_fxp_mod_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct
+cpfl_flow_engine cpfl_fxp_engine = {
+	.type = CPFL_FLOW_ENGINE_FXP,
+	.init = cpfl_fxp_init,
+	.uninit = cpfl_fxp_uninit,
+	.create = cpfl_fxp_create,
+	.destroy = cpfl_fxp_destroy,
+	.query_count = cpfl_fxp_query,
+	.parse_pattern_action = cpfl_fxp_parse_pattern_action,
+};
+
+RTE_INIT(cpfl_sw_engine_init)
+{
+	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
+
+	cpfl_flow_engine_register(engine);
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index f5d92a019e..1e86d7ee15 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -44,6 +44,7 @@ js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
     sources += files(
         'cpfl_flow.c',
+	'cpfl_flow_engine_fxp.c',
         'cpfl_flow_parser.c',
         'cpfl_rules.c',
 	    'cpfl_fxp_rule.c',
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v2 8/8] net/cpfl: add flow support for representor
  2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
                     ` (6 preceding siblings ...)
  2023-09-01 11:31   ` [PATCH v2 7/8] net/cpfl: add fxp flow engine Yuying Zhang
@ 2023-09-01 11:31   ` Yuying Zhang
  2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
  8 siblings, 0 replies; 128+ messages in thread
From: Yuying Zhang @ 2023-09-01 11:31 UTC (permalink / raw)
  To: dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Yuying Zhang

Add flow support for representor, so representor can
create, destroy, validate and flush rules.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 doc/guides/nics/cpfl.rst                | 13 +++++++++++
 doc/guides/rel_notes/release_23_11.rst  |  1 +
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 24 ++++++++++++++++++--
 drivers/net/cpfl/cpfl_representor.c     | 29 +++++++++++++++++++++++++
 4 files changed, 65 insertions(+), 2 deletions(-)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 7032dd1a1a..e2fe5430ed 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -196,3 +196,16 @@ low level hardware resources defined in a DDP package file.
    .. code-block:: console
 
    dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
+
+#. Create one flow to forward ETH-IPV4-TCP from I/O port to a local(CPF's) vport::
+
+   .. code-block:: console
+
+   flow create 0 ingress group 1 pattern eth dst is 00:01:00:00:03:14 / ipv4 src is 192.168.0.1 \
+   dst is 192.168.0.2 / tcp / end actions port_representor port_id 0 / end
+
+#. Send the packet, and it should be displayed on PMD::
+
+   .. code-block:: console
+
+   sendp(Ether(dst='00:01:00:00:03:14')/IP(src='192.168.0.1',dst='192.168.0.2')/TCP(),iface="enp24s0f0")
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 688bee4d6d..eded3ecc84 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -58,6 +58,7 @@ New Features
 * **Updated Intel cpfl driver.**
 
   * Added support for port representor.
+  * Added support for rte_flow.
 
 Removed Items
 -------------
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index e0c08a77c3..f40402a912 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -73,6 +73,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 	struct cpfl_adapter_ext *ad = itf->adapter;
 	struct cpfl_rule_info_meta *rim = meta;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	if (!rim)
 		return ret;
@@ -83,6 +84,10 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 		 * Even index is tx queue and odd index is rx queue.
 		 */
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -122,6 +127,7 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	uint32_t i;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	rim = flow->rule;
 	if (!rim) {
@@ -135,6 +141,10 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	if (itf->type == CPFL_ITF_TYPE_VPORT) {
 		vport = (struct cpfl_vport *)itf;
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -257,6 +267,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 	int queue_id = -1;
 	bool fwd_vsi = false;
 	bool fwd_q = false;
+	bool is_vsi;
 	uint32_t i;
 	struct cpfl_rule_info *rinfo = &rim->rules[index];
 	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
@@ -267,6 +278,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 		action_type = action->type;
 		switch (action_type) {
 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
 			if (!fwd_vsi)
 				fwd_vsi = true;
 			else
@@ -285,12 +297,20 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 				queue_id = CPFL_INVALID_QUEUE_ID;
 			}
 
-			dev_id = cpfl_get_vsi_id(dst_itf);
+			is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
+				  dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR);
+			if (is_vsi)
+				dev_id = cpfl_get_vsi_id(dst_itf);
+			else
+				dev_id = cpfl_get_port_id(dst_itf);
 
 			if (dev_id == CPFL_INVALID_HW_ID)
 				goto err;
 
-			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			if (is_vsi)
+				*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			else
+				*act_set = cpfl_act_fwd_port(0, priority, 0, dev_id);
 			act_set++;
 			rinfo->act_byte_len += sizeof(union cpfl_action_set);
 			break;
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 83069d0830..6e7d3fd0a6 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,8 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 static int
 cpfl_repr_whitelist_update(struct cpfl_adapter_ext *adapter,
@@ -325,6 +327,22 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
+static int
+cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev,
+			   const struct rte_flow_ops **ops)
+{
+	if (!dev)
+		return -EINVAL;
+
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -336,6 +354,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.tx_queue_setup		= idpf_repr_tx_queue_setup,
 
 	.link_update		= cpfl_repr_link_update,
+	.flow_ops_get		= cpfl_dev_repr_flow_ops_get,
 };
 
 static int
@@ -344,6 +363,7 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
 	struct cpfl_repr_param *param = init_param;
 	struct cpfl_adapter_ext *adapter = param->adapter;
+	int ret;
 
 	repr->repr_id = param->repr_id;
 	repr->vport_info = param->vport_info;
@@ -353,6 +373,15 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	if (repr->vport_info->vport_info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
 		repr->func_up = true;
 
+	TAILQ_INIT(&repr->itf.flow_list);
+	memset(repr->itf.dma, 0, sizeof(repr->itf.dma));
+	memset(repr->itf.msg, 0, sizeof(repr->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		return ret;
+
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v3 0/9] add rte flow support for cpfl
  2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
                     ` (7 preceding siblings ...)
  2023-09-01 11:31   ` [PATCH v2 8/8] net/cpfl: add flow support for representor Yuying Zhang
@ 2023-09-06  9:33   ` Wenjing Qiao
  2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
                       ` (10 more replies)
  8 siblings, 11 replies; 128+ messages in thread
From: Wenjing Qiao @ 2023-09-06  9:33 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing
  Cc: mingxia.liu, Wenjing Qiao

This patchset add rte flow support for cpfl driver.
It depends on the following patch set:
http://patchwork.dpdk.org/project/dpdk/cover/20230816150541.144532-1-beilei.xing@intel.com/

Wenjing Qiao (4):
  net/cpfl: parse flow parser file in devargs
  net/cpfl: add flow json parser
  net/cpfl: add FXP low level implementation
  net/cpfl: setup ctrl path

Yuying Zhang (5):
  net/cpfl: set up rte flow skeleton
  net/cpfl: add fxp rule module
  net/cpfl: add fxp flow engine
  net/cpfl: add flow support for representor
  app/test-pmd: refine encap content

 app/test-pmd/cmdline_flow.c             |   12 +-
 doc/guides/nics/cpfl.rst                |   45 +
 doc/guides/rel_notes/release_23_11.rst  |    1 +
 drivers/net/cpfl/cpfl_actions.h         |  858 ++++++++++
 drivers/net/cpfl/cpfl_controlq.c        |  803 ++++++++++
 drivers/net/cpfl/cpfl_controlq.h        |   75 +
 drivers/net/cpfl/cpfl_ethdev.c          |  390 ++++-
 drivers/net/cpfl/cpfl_ethdev.h          |  125 ++
 drivers/net/cpfl/cpfl_flow.c            |  339 ++++
 drivers/net/cpfl/cpfl_flow.h            |   85 +
 drivers/net/cpfl/cpfl_flow_engine_fxp.c |  667 ++++++++
 drivers/net/cpfl/cpfl_flow_parser.c     | 1910 +++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h     |  236 +++
 drivers/net/cpfl/cpfl_fxp_rule.c        |  297 ++++
 drivers/net/cpfl/cpfl_fxp_rule.h        |   68 +
 drivers/net/cpfl/cpfl_representor.c     |   29 +
 drivers/net/cpfl/cpfl_rules.c           |  126 ++
 drivers/net/cpfl/cpfl_rules.h           |  306 ++++
 drivers/net/cpfl/cpfl_vchnl.c           |  144 ++
 drivers/net/cpfl/meson.build            |   14 +
 20 files changed, 6527 insertions(+), 3 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v3 1/9] net/cpfl: parse flow parser file in devargs
  2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
  2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
@ 2023-09-06  9:33     ` Wenjing Qiao
  2023-09-11  0:48       ` Wu, Jingjing
  2023-09-06  9:34     ` [PATCH v3 2/9] net/cpfl: add flow json parser Wenjing Qiao
                       ` (8 subsequent siblings)
  10 siblings, 1 reply; 128+ messages in thread
From: Wenjing Qiao @ 2023-09-06  9:33 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing
  Cc: mingxia.liu, Wenjing Qiao

Add devargs "flow_parser" for rte_flow json parser.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 doc/guides/nics/cpfl.rst       | 32 ++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.c | 38 +++++++++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_ethdev.h |  3 +++
 drivers/net/cpfl/meson.build   |  6 ++++++
 4 files changed, 78 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index c20334230b..7032dd1a1a 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -128,12 +128,24 @@ Runtime Configuration
 
     -a BDF,representor=vf[0-3],representor=c1pf1
 
+- ``flow_parser`` (default ``not enabled``)
+
+  The PMD supports using a JSON file to parse rte_flow tokens into low level hardware
+  resources defined in a DDP package file.
+
+  The user can specify the path of json file, for example::
+
+    -a ca:00.0,flow_parser="refpkg.json"
+
+  Then the PMD will load json file for device ``ca:00.0``.
+  The parameter is optional.
 
 Driver compilation and testing
 ------------------------------
 
 Refer to the document :doc:`build_and_test` for details.
 
+Rte flow need to install json-c library.
 
 Features
 --------
@@ -164,3 +176,23 @@ Hairpin queue
 E2100 Series can loopback packets from RX port to TX port.
 This feature is called port-to-port or hairpin.
 Currently, the PMD only supports single port hairpin.
+
+Rte_flow
+~~~~~~~~~~~~~
+
+Rte_flow uses a json file to direct CPF PMD to parse rte_flow tokens into
+low level hardware resources defined in a DDP package file.
+
+#. install json-c library::
+
+   .. code-block:: console
+
+   git clone https://github.com/json-c/json-c.git
+   cd json-c
+   git checkout 777dd06be83ef7fac71c2218b565557cd068a714
+
+#. run testpmd with the json file::
+
+   .. code-block:: console
+
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 88c1479f3a..3c4a6a4724 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -21,6 +21,10 @@
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+#define CPFL_FLOW_PARSER	"flow_parser"
+#endif
+
 rte_spinlock_t cpfl_adapter_lock;
 /* A list for all adapters, one adapter matches one PCI device */
 struct cpfl_adapter_list cpfl_adapter_list;
@@ -31,6 +35,9 @@ static const char * const cpfl_valid_args_first[] = {
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	CPFL_FLOW_PARSER,
+#endif
 	NULL
 };
 
@@ -1537,6 +1544,24 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 	return 0;
 }
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+static int
+parse_file(const char *key, const char *value, void *args)
+{
+	char *name = args;
+
+	if (strlen(value) > CPFL_FLOW_FILE_LEN - 1) {
+		PMD_DRV_LOG(ERR, "file path(%s) is too long.", value);
+		return -1;
+	}
+
+	PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key);
+	strlcpy(name, value, CPFL_FLOW_FILE_LEN);
+
+	return 0;
+}
+#endif
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
@@ -1585,7 +1610,18 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 				 &adapter->base.is_rx_singleq);
 	if (ret != 0)
 		goto fail;
-
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) {
+		ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER,
+					 &parse_file, cpfl_args->flow_parser);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to parser flow_parser, ret: %d", ret);
+			goto fail;
+		}
+	} else {
+		cpfl_args->flow_parser[0] = '\0';
+	}
+#endif
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index a891bd8df9..ed730cc0e9 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -87,6 +87,8 @@
 #define ACC_LCE_ID	15
 #define IMC_MBX_EFD_ID	0
 
+#define CPFL_FLOW_FILE_LEN 100
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -100,6 +102,7 @@ struct cpfl_devargs {
 	uint16_t req_vport_nb;
 	uint8_t repr_args_num;
 	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
+	char flow_parser[CPFL_FLOW_FILE_LEN];
 };
 
 struct p2p_queue_chunks_info {
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index fb075c6860..0be25512c3 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -38,3 +38,9 @@ if arch_subdir == 'x86'
         cflags += ['-DCC_AVX512_SUPPORT']
     endif
 endif
+
+js_dep = dependency('json-c', required: false, method : 'pkg-config')
+if js_dep.found()
+    dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
+    ext_deps += js_dep
+endif
\ No newline at end of file
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v3 2/9] net/cpfl: add flow json parser
  2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
  2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
  2023-09-06  9:33     ` [PATCH v3 1/9] net/cpfl: parse flow parser file in devargs Wenjing Qiao
@ 2023-09-06  9:34     ` Wenjing Qiao
  2023-09-08  6:26       ` Liu, Mingxia
  2023-09-11  6:24       ` Wu, Jingjing
  2023-09-06  9:34     ` [PATCH v3 3/9] net/cpfl: add FXP low level implementation Wenjing Qiao
                       ` (7 subsequent siblings)
  10 siblings, 2 replies; 128+ messages in thread
From: Wenjing Qiao @ 2023-09-06  9:34 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing
  Cc: mingxia.liu, Wenjing Qiao

A JSON file will be used to direct DPDK CPF PMD to
parse rte_flow tokens into low level hardware resources
defined in a DDP package file.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h      |   70 +
 drivers/net/cpfl/cpfl_flow_parser.c | 1910 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  236 ++++
 drivers/net/cpfl/meson.build        |    3 +
 4 files changed, 2219 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index ed730cc0e9..2151605987 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -89,6 +89,10 @@
 
 #define CPFL_FLOW_FILE_LEN 100
 
+#define CPFL_INVALID_HW_ID	UINT16_MAX
+#define CPFL_META_CHUNK_LENGTH	1024
+#define CPFL_META_LENGTH	32
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -169,6 +173,16 @@ struct cpfl_repr {
 	bool func_up; /* If the represented function is up */
 };
 
+struct cpfl_metadata_chunk {
+	int type;
+	uint8_t data[CPFL_META_CHUNK_LENGTH];
+};
+
+struct cpfl_metadata {
+	int length;
+	struct cpfl_metadata_chunk chunks[CPFL_META_LENGTH];
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -189,6 +203,8 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_whitelist_hash;
+
+	struct cpfl_metadata meta;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -215,4 +231,58 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
+static inline uint16_t
+cpfl_get_port_id(struct cpfl_itf *itf)
+{
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		struct cpfl_vport *vport = (void *)itf;
+
+		return vport->base.devarg_id;
+	}
+
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline uint16_t
+cpfl_get_vsi_id(struct cpfl_itf *itf)
+{
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_vport_info *info;
+	uint32_t vport_id;
+	int ret;
+	struct cpfl_vport_id vport_identity;
+
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		struct cpfl_repr *repr = (void *)itf;
+
+		return repr->vport_info->vport_info.vsi_id;
+	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
+
+		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		/* host: HOST0_CPF_ID, acc: ACC_CPF_ID */
+		vport_identity.pf_id = ACC_CPF_ID;
+		vport_identity.vf_id = 0;
+		vport_identity.vport_id = vport_id;
+		ret = rte_hash_lookup_data(adapter->vport_map_hash,
+					   &vport_identity,
+					   (void **)&info);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "vport id not exist");
+			goto err;
+		}
+
+		return info->vport_info.vsi_id;
+	}
+
+err:
+	return CPFL_INVALID_HW_ID;
+}
+
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
new file mode 100644
index 0000000000..59b627e99c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -0,0 +1,1910 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <arpa/inet.h>
+#include <asm-generic/errno-base.h>
+#include <stdint.h>
+
+#include "cpfl_flow_parser.h"
+#include "cpfl_ethdev.h"
+#include "rte_malloc.h"
+
+static enum rte_flow_item_type
+cpfl_get_item_type_by_str(const char *type)
+{
+	if (strcmp(type, "eth") == 0)
+		return RTE_FLOW_ITEM_TYPE_ETH;
+	else if (strcmp(type, "ipv4") == 0)
+		return RTE_FLOW_ITEM_TYPE_IPV4;
+	else if (strcmp(type, "tcp") == 0)
+		return RTE_FLOW_ITEM_TYPE_TCP;
+	else if (strcmp(type, "udp") == 0)
+		return RTE_FLOW_ITEM_TYPE_UDP;
+	else if (strcmp(type, "vxlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VXLAN;
+	else if (strcmp(type, "icmp") == 0)
+		return RTE_FLOW_ITEM_TYPE_ICMP;
+	else if (strcmp(type, "vlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VLAN;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ITEM_TYPE_VOID;
+}
+
+static enum rte_flow_action_type
+cpfl_get_action_type_by_str(const char *type)
+{
+	if (strcmp(type, "vxlan_encap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	else if (strcmp(type, "vxlan_decap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ACTION_TYPE_VOID;
+}
+
+static const char *
+cpfl_json_object_to_string(json_object *object, const char *name)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return NULL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return NULL;
+	}
+
+	return json_object_get_string(subobject);
+}
+
+static int
+cpfl_json_object_to_int(json_object *object, const char *name, int *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_json_object_to_uint16(json_object *object, const char *name, uint16_t *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_json_object_to_uint32(json_object *object, const char *name, uint32_t *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int64(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_attr(json_object *cjson_pr_key_attr, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len;
+	struct cpfl_flow_js_pr_key_attr *attr;
+
+	len = json_object_array_length(cjson_pr_key_attr);
+	js_pr->key.attributes = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_attr), 0);
+	if (!js_pr->key.attributes) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->key.attr_size = len;
+	attr = js_pr->key.attributes;
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		const char *name;
+		uint16_t value = 0;
+		int ret;
+
+		object = json_object_array_get_idx(cjson_pr_key_attr, i);
+		name = cpfl_json_object_to_string(object, "Name");
+		if (!name) {
+			rte_free(js_pr->key.attributes);
+			PMD_DRV_LOG(ERR, "Can not parse string 'Name'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(object, "Value", &value);
+		if (ret < 0) {
+			rte_free(js_pr->key.attributes);
+			PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+			return -EINVAL;
+		}
+		if (strcmp(name, "ingress") == 0) {
+			attr->ingress = value;
+		} else if (strcmp(name, "egress") == 0) {
+			attr->egress = value;
+		} else {
+			/* TODO: more... */
+			rte_free(js_pr->key.attributes);
+			PMD_DRV_LOG(ERR, "Not support attr name: %s.", name);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto_field(json_object *cjson_field,
+				     struct cpfl_flow_js_pr_key_proto *js_field)
+{
+	int len, i;
+
+	if (!cjson_field)
+		return 0;
+	len = json_object_array_length(cjson_field);
+	js_field->fields_size = len;
+	if (len == 0)
+		return 0;
+	js_field->fields =
+	    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len, 0);
+	if (!js_field->fields) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		const char *name, *mask;
+
+		object = json_object_array_get_idx(cjson_field, i);
+		name = cpfl_json_object_to_string(object, "name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
+			goto err;
+		}
+		if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+			PMD_DRV_LOG(ERR, "The 'name' is too long.");
+			goto err;
+		}
+		memcpy(js_field->fields[i].name, name, strlen(name));
+
+		if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
+		    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			mask = cpfl_json_object_to_string(object, "mask");
+			if (!mask) {
+				PMD_DRV_LOG(ERR, "Can not parse string 'mask'.");
+				goto err;
+			}
+			memcpy(js_field->fields[i].mask, mask, strlen(mask));
+		} else {
+			uint32_t mask_32b;
+			int ret;
+
+			ret = cpfl_json_object_to_uint32(object, "mask", &mask_32b);
+			if (ret < 0) {
+				PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'.");
+				goto err;
+			}
+			js_field->fields[i].mask_32b = mask_32b;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_field->fields);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto(json_object *cjson_pr_key_proto, struct cpfl_flow_js_pr *js_pr)
+{
+	int len, i, ret;
+
+	len = json_object_array_length(cjson_pr_key_proto);
+	js_pr->key.proto_size = len;
+	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto) * len, 0);
+	if (!js_pr->key.protocols) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_object *object, *cjson_pr_key_proto_fields;
+		const char *type;
+		enum rte_flow_item_type item_type;
+
+		object = json_object_array_get_idx(cjson_pr_key_proto, i);
+		/* pr->key->proto->type */
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		item_type = cpfl_get_item_type_by_str(type);
+		if (item_type == RTE_FLOW_ITEM_TYPE_VOID)
+			goto err;
+		js_pr->key.protocols[i].type = item_type;
+		/* pr->key->proto->fields */
+		cjson_pr_key_proto_fields = json_object_object_get(object, "fields");
+		ret = cpfl_flow_js_pattern_key_proto_field(cjson_pr_key_proto_fields,
+							   &js_pr->key.protocols[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	rte_free(js_pr->key.protocols);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_proto(json_object *cjson_value, struct cpfl_flow_js_fv *js_fv)
+{
+	uint16_t layer = 0, offset = 0, mask = 0;
+	const char *header;
+	enum rte_flow_item_type type;
+	int ret;
+
+	ret = cpfl_json_object_to_uint16(cjson_value, "layer", &layer);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+		return -EINVAL;
+	}
+
+	header = cpfl_json_object_to_string(cjson_value, "header");
+	if (!header) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_object_to_uint16(cjson_value, "offset", &offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_object_to_uint16(cjson_value, "mask", &mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
+		return -EINVAL;
+	}
+	js_fv->proto.layer = layer;
+	js_fv->proto.offset = offset;
+	js_fv->proto.mask = mask;
+	type = cpfl_get_item_type_by_str(header);
+	if (type == RTE_FLOW_ITEM_TYPE_VOID)
+		return -EINVAL;
+	js_fv->proto.header = type;
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_metadata(json_object *cjson_value, struct cpfl_flow_js_fv *js_fv)
+{
+	int ret;
+
+	ret = cpfl_json_object_to_uint16(cjson_value, "type", &js_fv->meta.type);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_object_to_uint16(cjson_value, "offset", &js_fv->meta.offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_object_to_uint16(cjson_value, "mask", &js_fv->meta.mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv(json_object *cjson_fv, struct cpfl_flow_js_pr_action *js_act)
+{
+	int len, i;
+
+	len = json_object_array_length(cjson_fv);
+	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0);
+	if (!js_act->sem.fv) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_act->sem.fv_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_fv *js_fv;
+		json_object *object, *cjson_value;
+		uint16_t offset = 0;
+		const char *type;
+		int ret;
+
+		object = json_object_array_get_idx(cjson_fv, i);
+		js_fv = &js_act->sem.fv[i];
+		ret = cpfl_json_object_to_uint16(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_fv->offset = offset;
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		cjson_value = json_object_object_get(object, "value");
+		if (strcmp(type, "immediate") == 0) {
+			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
+			js_fv->immediate = json_object_get_int(cjson_value);
+		} else if (strcmp(type, "metadata") == 0) {
+			js_fv->type = CPFL_FV_TYPE_METADATA;
+			cpfl_flow_js_pattern_act_fv_metadata(cjson_value, js_fv);
+		} else if (strcmp(type, "protocol") == 0) {
+			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
+			cpfl_flow_js_pattern_act_fv_proto(cjson_value, js_fv);
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_act->sem.fv);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_per_act(json_object *cjson_per_act, struct cpfl_flow_js_pr_action *js_act)
+{
+	const char *type;
+	int ret;
+
+	/* pr->actions->type */
+	type = cpfl_json_object_to_string(cjson_per_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* pr->actions->data */
+	if (strcmp(type, "sem") == 0) {
+		json_object *cjson_fv, *cjson_pr_action_sem;
+
+		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+		cjson_pr_action_sem = json_object_object_get(cjson_per_act, "data");
+		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem, "profile",
+						 &js_act->sem.prof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem, "subprofile",
+						 &js_act->sem.subprof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem, "keysize",
+						 &js_act->sem.keysize);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
+			return -EINVAL;
+		}
+		cjson_fv = json_object_object_get(cjson_pr_action_sem, "fieldvectors");
+		ret = cpfl_flow_js_pattern_act_fv(cjson_fv, js_act);
+		if (ret < 0)
+			return ret;
+	} else {
+		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act(json_object *cjson_pr_act, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len, ret;
+
+	len = json_object_array_length(cjson_pr_act);
+	js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action) * len, 0);
+	if (!js_pr->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->actions_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_pr_action *js_act;
+		json_object *object;
+
+		object = json_object_array_get_idx(cjson_pr_act, i);
+		js_act = &js_pr->actions[i];
+		ret = cpfl_flow_js_pattern_per_act(object, js_act);
+		if (ret < 0) {
+			rte_free(js_pr->actions);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_rule(json_object *json_root, struct cpfl_flow_js_parser *parser)
+{
+	json_object *cjson_pr;
+	int i, len;
+
+	/* Pattern Rules */
+	cjson_pr = json_object_object_get(json_root, "patterns");
+	if (!cjson_pr) {
+		PMD_DRV_LOG(ERR, "The patterns is mandatory.");
+		return -EINVAL;
+	}
+
+	len = json_object_array_length(cjson_pr);
+	parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len, 0);
+	if (!parser->patterns) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	parser->pr_size = len;
+	for (i = 0; i < len; i++) {
+		json_object *object, *cjson_pr_actions, *cjson_pr_key, *cjson_pr_key_proto,
+		    *cjson_pr_key_attr;
+		int ret;
+
+		object = json_object_array_get_idx(cjson_pr, i);
+		/* pr->key */
+		cjson_pr_key = json_object_object_get(object, "key");
+		/* pr->key->protocols */
+		cjson_pr_key_proto = json_object_object_get(cjson_pr_key, "protocols");
+		ret = cpfl_flow_js_pattern_key_proto(cjson_pr_key_proto, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->protocols.");
+			goto err;
+		}
+		/* pr->key->attributes */
+		cjson_pr_key_attr = json_object_object_get(cjson_pr_key, "attributes");
+		ret = cpfl_flow_js_pattern_key_attr(cjson_pr_key_attr, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->attributes.");
+			goto err;
+		}
+		/* pr->actions */
+		cjson_pr_actions = json_object_object_get(object, "actions");
+		ret = cpfl_flow_js_pattern_act(cjson_pr_actions, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->patterns);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_key(json_object *cjson_mr_key, struct cpfl_flow_js_mr_key *js_mr_key)
+{
+	int len, i;
+
+	len = json_object_array_length(cjson_mr_key);
+	js_mr_key->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_key_action) * len, 0);
+	if (!js_mr_key->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_mr_key->actions_size = len;
+	for (i = 0; i < len; i++) {
+		json_object *object, *cjson_mr_key_data;
+		const char *type;
+		enum rte_flow_action_type act_type;
+
+		object = json_object_array_get_idx(cjson_mr_key, i);
+		/* mr->key->actions->type */
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		act_type = cpfl_get_action_type_by_str(type);
+		if (act_type == RTE_FLOW_ACTION_TYPE_VOID)
+			goto err;
+		js_mr_key->actions[i].type = act_type;
+		/* mr->key->actions->data */
+		cjson_mr_key_data = json_object_object_get(object, "data");
+		if (js_mr_key->actions[i].type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			json_object *cjson_mr_key_proto;
+			int proto_size, j;
+			struct cpfl_flow_js_mr_key_action_vxlan_encap *encap;
+
+			cjson_mr_key_proto = json_object_object_get(cjson_mr_key_data, "protocols");
+			encap = &js_mr_key->actions[i].encap;
+			if (!cjson_mr_key_proto) {
+				encap->proto_size = 0;
+				continue;
+			}
+			proto_size = json_object_array_length(cjson_mr_key_proto);
+			encap->proto_size = proto_size;
+			for (j = 0; j < proto_size; j++) {
+				const char *s;
+				json_object *subobject;
+				enum rte_flow_item_type proto_type;
+
+				subobject = json_object_array_get_idx(cjson_mr_key_proto, j);
+				s = json_object_get_string(subobject);
+				proto_type = cpfl_get_item_type_by_str(s);
+				if (proto_type == RTE_FLOW_ITEM_TYPE_VOID) {
+					PMD_DRV_LOG(ERR, "parse VXLAN_ENCAP failed.");
+					goto err;
+				}
+				encap->protocols[j] = proto_type;
+			}
+		} else if (js_mr_key->actions[i].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			PMD_DRV_LOG(ERR, "not support this type: %d.", js_mr_key->actions[i].type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mr_key->actions);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_layout(json_object *cjson_layout, struct cpfl_flow_js_mr_action_mod *js_mod)
+{
+	int len, i;
+
+	len = json_object_array_length(cjson_layout);
+	js_mod->layout_size = len;
+	if (len == 0)
+		return 0;
+	js_mod->layout = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_layout) * len, 0);
+	if (!js_mod->layout) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		int index = 0, size = 0, offset = 0, ret;
+		const char *hint;
+
+		object = json_object_array_get_idx(cjson_layout, i);
+		ret = cpfl_json_object_to_int(object, "index", &index);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'index'.");
+			goto err;
+		}
+		js_mod->layout[i].index = index;
+		ret = cpfl_json_object_to_int(object, "size", &size);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+			goto err;
+		}
+		js_mod->layout[i].size = size;
+		ret = cpfl_json_object_to_int(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_mod->layout[i].offset = offset;
+		hint = cpfl_json_object_to_string(object, "hint");
+		if (!hint) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'hint'.");
+			goto err;
+		}
+		memcpy(js_mod->layout[i].hint, hint, strlen(hint));
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mod->layout);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_action(json_object *cjson_mr_act, struct cpfl_flow_js_mr_action *js_mr_act)
+{
+	json_object *cjson_mr_action_data;
+	const char *type;
+
+	/* mr->action->type */
+	type = cpfl_json_object_to_string(cjson_mr_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* mr->action->data */
+	cjson_mr_action_data = json_object_object_get(cjson_mr_act, "data");
+	if (strcmp(type, "mod") == 0) {
+		json_object *layout;
+		uint16_t profile = 0;
+		int ret;
+
+		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		ret = cpfl_json_object_to_uint16(cjson_mr_action_data, "profile", &profile);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		js_mr_act->mod.prof = profile;
+		layout = json_object_object_get(cjson_mr_action_data, "layout");
+		ret = cpfl_flow_js_mr_layout(layout, &js_mr_act->mod);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse layout.");
+			return ret;
+		}
+	} else  {
+		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_mod_rule(json_object *json_root, struct cpfl_flow_js_parser *parser)
+{
+	json_object *cjson_mr;
+	int i, len;
+
+	cjson_mr = json_object_object_get(json_root, "modifications");
+	if (!cjson_mr) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+	len = json_object_array_length(cjson_mr);
+	parser->mr_size = len;
+	if (len == 0)
+		return 0;
+	parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr) * len, 0);
+	if (!parser->modifications) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		int ret;
+		json_object *object, *cjson_mr_key, *cjson_mr_action, *cjson_mr_key_action;
+
+		object = json_object_array_get_idx(cjson_mr, i);
+		/* mr->key */
+		cjson_mr_key = json_object_object_get(object, "key");
+		/* mr->key->actions */
+		cjson_mr_key_action = json_object_object_get(cjson_mr_key, "actions");
+		ret = cpfl_flow_js_mr_key(cjson_mr_key_action, &parser->modifications[i].key);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_key failed.");
+			goto err;
+		}
+		/* mr->action */
+		cjson_mr_action = json_object_object_get(object, "action");
+		ret = cpfl_flow_js_mr_action(cjson_mr_action, &parser->modifications[i].action);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_action failed.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->modifications);
+	return -EINVAL;
+}
+
+static int
+cpfl_parser_init(json_object *json_root, struct cpfl_flow_js_parser *parser)
+{
+	int ret = 0;
+
+	ret = cpfl_flow_js_pattern_rule(json_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
+		return ret;
+	}
+	ret = cpfl_flow_js_mod_rule(json_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char *filename)
+{
+	struct cpfl_flow_js_parser *parser;
+	json_object *root;
+	int ret;
+
+	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser), 0);
+	if (!parser) {
+		PMD_DRV_LOG(ERR, "Not enough memory to create flow parser.");
+		return -ENOMEM;
+	}
+	root = json_object_from_file(filename);
+	if (!root) {
+		PMD_DRV_LOG(ERR, "Can not load JSON file: %s.", filename);
+		rte_free(parser);
+		return -EINVAL;
+	}
+	ret = cpfl_parser_init(root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parser init failed.");
+		goto free_parser;
+	}
+	*flow_parser = parser;
+	ret = json_object_put(root);
+	if (ret != 1) {
+		PMD_DRV_LOG(ERR, "Free json_object failed.");
+		return -EINVAL;
+	}
+
+	return 0;
+free_parser:
+	rte_free(parser);
+	return -EINVAL;
+}
+
+static void
+cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
+{
+	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM)
+		rte_free(pr_act->sem.fv);
+}
+
+int
+cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
+{
+	int i, j;
+
+	if (!parser)
+		return 0;
+
+	for (i = 0; i < parser->pr_size; i++) {
+		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
+
+		if (!pattern)
+			return -EINVAL;
+		for (j = 0; j < pattern->key.proto_size; j++)
+			rte_free(pattern->key.protocols[j].fields);
+		rte_free(pattern->key.protocols);
+		rte_free(pattern->key.attributes);
+
+		for (j = 0; j < pattern->actions_size; j++) {
+			struct cpfl_flow_js_pr_action *pr_act;
+
+			pr_act = &pattern->actions[j];
+			cpfl_parser_free_pr_action(pr_act);
+		}
+		rte_free(pattern->actions);
+	}
+	rte_free(parser->patterns);
+	for (i = 0; i < parser->mr_size; i++) {
+		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
+
+		if (!mr)
+			return -EINVAL;
+		rte_free(mr->key.actions);
+		rte_free(mr->action.mod.layout);
+	}
+	rte_free(parser->modifications);
+	rte_free(parser);
+
+	return 0;
+}
+
+static int
+cpfl_get_items_length(const struct rte_flow_item *items)
+{
+	int length = 0;
+	const struct rte_flow_item *item = items;
+
+	while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_get_actions_length(const struct rte_flow_action *actions)
+{
+	int length = 0;
+	const struct rte_flow_action *action = actions;
+
+	while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
+		       uint16_t offset, uint8_t *fv)
+{
+	uint16_t v_layer, v_offset, v_mask;
+	enum rte_flow_item_type v_header;
+	int j, layer, length;
+	uint16_t temp_fv;
+
+	length = cpfl_get_items_length(items);
+	v_layer = js_fv->proto.layer;
+	v_header = js_fv->proto.header;
+	v_offset = js_fv->proto.offset;
+	v_mask = js_fv->proto.mask;
+	layer = 0;
+	for (j = 0; j < length - 1; j++) {
+		if (items[j].type == v_header) {
+			if (layer == v_layer) {
+				/* copy out 16 bits from offset */
+				const uint8_t *pointer;
+
+				pointer = &(((const uint8_t *)(items[j].spec))[v_offset]);
+				temp_fv = ntohs((*((const uint16_t *)pointer)) & v_mask);
+				fv[2 * offset] = (uint8_t)((temp_fv & 0xff00) >> 8);
+				fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+				break;
+			}
+			layer++;
+		} /* TODO: more type... */
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_fieldvectors(struct cpfl_itf *itf, struct cpfl_flow_js_fv *js_fvs, int size,
+			uint8_t *fv, const struct rte_flow_item *items)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		uint16_t offset, temp_fv, value_int;
+		enum cpfl_flow_js_fv_type type;
+		struct cpfl_flow_js_fv *js_fv;
+
+		js_fv = &js_fvs[i];
+		offset = js_fv->offset;
+		type = js_fv->type;
+		if (type == CPFL_FV_TYPE_IMMEDIATE) {
+			value_int = js_fv->immediate;
+			temp_fv = (value_int << 8) & 0xff00;
+			fv[2 * offset] = (uint8_t)((temp_fv & 0xff00) >> 8);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+		} else if (type == CPFL_FV_TYPE_METADATA) {
+			uint16_t type, v_offset, mask;
+
+			type = js_fv->meta.type;
+			v_offset = js_fv->meta.offset;
+			mask = js_fv->meta.mask;
+			temp_fv = cpfl_metadata_read16(&itf->adapter->meta, type, v_offset) & mask;
+			fv[2 * offset] = (uint8_t)(temp_fv & 0x00ff);
+			fv[2 * offset + 1] = (uint8_t)((temp_fv & 0xff00) >> 8);
+		} else if (type == CPFL_FV_TYPE_PROTOCOL) {
+			ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv);
+			if (ret)
+				return ret;
+		} else {
+			PMD_DRV_LOG(DEBUG, "not support this type: %d.", type);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_pr_actions(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_pr_action *actions,
+		      int size,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		struct cpfl_flow_js_pr_action *pr_act;
+		enum cpfl_flow_pr_action_type type;
+
+		pr_act = &actions[i];
+		/* pr->actions->type */
+		type = pr_act->type;
+		/* pr->actions->data */
+		if (attr->group % 10 == 1  && type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+			struct cpfl_flow_js_pr_action_sem *sem = &pr_act->sem;
+
+			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+			pr_action->sem.prof = sem->prof;
+			pr_action->sem.subprof = sem->subprof;
+			pr_action->sem.keysize = sem->keysize;
+			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
+			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
+			ret = cpfl_parse_fieldvectors(itf, sem->fv, sem->fv_size,
+						      pr_action->sem.cpfl_flow_pr_fv, items);
+			return ret;
+		} else if (attr->group > 4 || attr->group == 0) {
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_str2mac(const char *mask, uint8_t *addr_bytes)
+{
+	int i, size, j;
+	uint8_t n;
+
+	size = strlen(mask);
+	n = 0;
+	j = 0;
+	for (i = 0; i < size; i++) {
+		char ch = mask[i];
+
+		if (ch == ':') {
+			if (j >= RTE_ETHER_ADDR_LEN)
+				return -EINVAL;
+			addr_bytes[j++] = n;
+			n = 0;
+		} else if (ch >= 'a' && ch <= 'f') {
+			n = n * 16 + ch - 'a' + 10;
+		} else if (ch >= 'A' && ch <= 'F') {
+			n = n * 16 + ch - 'A' + 10;
+		} else if (ch >= '0' && ch <= '9') {
+			n = n * 16 + ch - '0';
+		} else {
+			return -EINVAL;
+		}
+	}
+	if (j < RTE_ETHER_ADDR_LEN)
+		addr_bytes[j++] = n;
+
+	if (j != RTE_ETHER_ADDR_LEN)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth_mask(const char *mask, const uint8_t addr_bytes[RTE_ETHER_ADDR_LEN])
+{
+	int i, ret;
+	uint8_t mask_bytes[RTE_ETHER_ADDR_LEN] = { 0 };
+
+	ret = cpfl_str2mac(mask, mask_bytes);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "translate mac address from string to uint8_t[] failed.");
+		return -EINVAL;
+	}
+	/* validate eth mask addr if match */
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+		if (mask_bytes[i] != addr_bytes[i])
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
+{
+	uint32_t out_addr;
+
+	/* 0: success; -EINVAL: invalid; -ENOTSUP: fail */
+	int ret = inet_pton(AF_INET, mask, &out_addr);
+
+	if (ret < 0)
+		return -EINVAL;
+	/* validate ipv4 mask addr if match */
+	if (out_addr != addr)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_eth *eth_mask)
+{
+	int field_size, j;
+	int flag_dst_addr, flag_src_addr, flag_ether_type;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !eth_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_ether_type = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name, *s_mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst, more see Field Mapping
+		 */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		if (strcmp(name, "src_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->src.addr_bytes) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->dst.addr_bytes) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "ether_type") == 0) {
+			uint16_t mask = (uint16_t)field->mask_32b;
+
+			if (mask != eth_mask->type)
+				return -EINVAL;
+			flag_ether_type = true;
+		} else {
+			/* TODO: more type... */
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (strcmp((const char *)eth_mask->src.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (strcmp((const char *)eth_mask->dst.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_ether_type) {
+		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+	int field_size, j;
+	int flag_next_proto_id, flag_src_addr, flag_dst_addr;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !ipv4_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_next_proto_id = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+
+		field = &proto->fields[j];
+		name = field->name;
+		if (strcmp(name, "src_addr") == 0) {
+			/* match: rte_flow_item->mask */
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.src_addr) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.dst_addr) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "next_proto_id") == 0) {
+			uint8_t mask;
+
+			mask = (uint8_t)field->mask_32b;
+			if (mask != ipv4_mask->hdr.next_proto_id)
+				return -EINVAL;
+			flag_next_proto_id = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_next_proto_id) {
+		if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_tcp *tcp_mask)
+{
+	int field_size, j;
+	int flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !tcp_mask)
+		return 0;
+
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (tcp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (tcp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (tcp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (tcp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_udp *udp_mask)
+{
+	int field_size, j;
+	bool flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && !udp_mask)
+		return 0;
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst */
+		name = field->name; /* match: rte_flow_item->mask */
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (udp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (udp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (udp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (udp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto,
+		 const struct rte_flow_item_vxlan *vxlan_mask)
+{
+	int field_size, j;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && !vxlan_mask)
+		return 0;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		int64_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		mask = (int64_t)field->mask_32b;
+		if (strcmp(name, "vx_vni") == 0) {
+			if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) != mask)
+				return -EINVAL;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_icmp *icmp_mask)
+{
+	int field_size;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !icmp_mask)
+		return -EINVAL;
+	if (field_size == 0 && icmp_mask)
+		return -EINVAL;
+	if (field_size == 0 && !icmp_mask)
+		return 0;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
+			     int proto_size,
+			     const struct rte_flow_item *items)
+{
+	int i, length;
+	int j = 0;
+
+	length = cpfl_get_items_length(items);
+	if (proto_size > length - 1)
+		return -EINVAL;
+	for (i = 0; i < proto_size; i++) {
+		struct cpfl_flow_js_pr_key_proto *key_proto;
+		enum rte_flow_item_type type;
+
+		key_proto = &protocols[i];
+		/* pr->key->proto->type */
+		type = key_proto->type;
+		/* pr->key->proto->fields */
+		switch (type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
+				const struct rte_flow_item_eth *eth_mask;
+				int ret;
+
+				eth_mask = (const struct rte_flow_item_eth *)items[i].mask;
+				ret = cpfl_check_eth(key_proto, eth_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				const struct rte_flow_item_ipv4 *ipv4_mask;
+				int ret;
+
+				ipv4_mask = (const struct rte_flow_item_ipv4 *)items[i].mask;
+				ret = cpfl_check_ipv4(key_proto, ipv4_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) {
+				const struct rte_flow_item_tcp *tcp_mask;
+				int ret;
+
+				tcp_mask = (const struct rte_flow_item_tcp *)items[i].mask;
+				ret = cpfl_check_tcp(key_proto, tcp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) {
+				const struct rte_flow_item_udp *udp_mask;
+				int ret;
+
+				udp_mask = (const struct rte_flow_item_udp *)items[i].mask;
+				ret = cpfl_check_udp(key_proto, udp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+				const struct rte_flow_item_vxlan *vxlan_mask;
+				int ret;
+
+				vxlan_mask = (const struct rte_flow_item_vxlan *)items[i].mask;
+				ret = cpfl_check_vxlan(key_proto, vxlan_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) {
+				const struct rte_flow_item_icmp *icmp_mask;
+				int ret;
+
+				icmp_mask = (const struct rte_flow_item_icmp *)items[i].mask;
+				ret = cpfl_check_icmp(key_proto, icmp_mask);
+				if (ret < 0)
+					return ret;
+
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+	if (items[j].type != RTE_FLOW_ITEM_TYPE_END)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr,
+			    const struct rte_flow_attr *attr)
+{
+	/* match: struct rte_flow_attr(ingress,egress) */
+	if (key_attr->ingress != attr->ingress) {
+		PMD_DRV_LOG(DEBUG, "ingress not match.");
+		return -EINVAL;
+	}
+	if (key_attr->egress != attr->egress) {
+		PMD_DRV_LOG(DEBUG, "egress not match.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern,
+		       const struct rte_flow_item *items,
+		       const struct rte_flow_attr *attr)
+{
+	int ret;
+
+	/* pr->key */
+	/* pr->key->protocols */
+	ret = cpfl_check_pattern_key_proto(pattern->key.protocols,
+					   pattern->key.proto_size, items);
+	if (ret < 0)
+		return -EINVAL;
+	/* pr->key->attributes */
+	ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr);
+	if (ret < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* output: struct cpfl_flow_pr_action* pr_action */
+static int
+cpfl_parse_pattern_rules(struct cpfl_itf *itf,
+			 struct cpfl_flow_js_parser *parser,
+			 const struct rte_flow_item *items,
+			 const struct rte_flow_attr *attr,
+			 struct cpfl_flow_pr_action *pr_action)
+{
+	int i, size;
+	struct cpfl_flow_js_pr *pattern;
+
+	size = parser->pr_size;
+	for (i = 0; i < size; i++) {
+		int ret;
+
+		pattern = &parser->patterns[i];
+		ret = cpfl_check_pattern_key(pattern, items, attr);
+		if (ret < 0)
+			continue;
+		/* pr->actions */
+		ret = cpfl_parse_pr_actions(itf, pattern->actions, pattern->actions_size,
+					    items, attr, pr_action);
+		return ret;
+	}
+
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_items(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_parser *parser,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int ret;
+
+	/* Pattern Rules */
+	ret = cpfl_parse_pattern_rules(itf, parser, items, attr, pr_action);
+	return ret;
+}
+
+/* modifications rules */
+static int
+cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap *encap,
+			       const struct rte_flow_action *action)
+{
+	const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+	struct rte_flow_item *definition;
+	int def_length, i, proto_size;
+
+	action_vxlan_encap = (const struct rte_flow_action_vxlan_encap *)action->conf;
+	definition = action_vxlan_encap->definition;
+	def_length = cpfl_get_items_length(definition);
+	proto_size = encap->proto_size;
+	if (proto_size != def_length - 1) {
+		PMD_DRV_LOG(DEBUG, "protocols not match.");
+		return -EINVAL;
+	}
+	for (i = 0; i < proto_size; i++) {
+		enum rte_flow_item_type proto;
+
+		proto = encap->protocols[i];
+		if (proto == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) {
+				PMD_DRV_LOG(DEBUG, "protocols not match.");
+				return -EINVAL;
+			}
+		} else if (proto != definition[i].type) {
+			PMD_DRV_LOG(DEBUG, "protocols not match.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* output: struct cpfl_flow_mr_key_action *mr_key_action */
+/* check and parse */
+static int
+cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int size,
+			 const struct rte_flow_action *actions,
+			 struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int actions_length, i;
+	int j = 0;
+	int ret;
+
+	actions_length = cpfl_get_actions_length(actions);
+	if (size > actions_length - 1)
+		return -EINVAL;
+	for (i = 0; i < size; i++) {
+		enum rte_flow_action_type type;
+		struct cpfl_flow_js_mr_key_action *key_act;
+
+		key_act = &key_acts[i];
+		/* mr->key->actions->type */
+		type = key_act->type;
+		/* mr->key->actions->data */
+		/* match: <type> action matches RTE_FLOW_ACTION_TYPE_<type> */
+		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			int proto_size, k;
+			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
+
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+			mr_key_action[i].encap.action = &actions[j];
+			encap = &mr_key_action[i].encap;
+
+			proto_size = key_act->encap.proto_size;
+			encap->proto_size = proto_size;
+			for (k = 0; k < proto_size; k++) {
+				enum rte_flow_item_type proto;
+
+				proto = key_act->encap.protocols[k];
+				encap->protocols[k] = proto;
+			}
+			ret = cpfl_check_actions_vxlan_encap(encap, &actions[j]);
+			if (ret < 0)
+				return -EINVAL;
+
+			j++;
+		} else if (type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+			j++;
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+/* output: uint8_t *buffer, uint16_t *byte_len */
+static int
+cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
+		  struct cpfl_flow_mr_key_action *mr_key_action,
+		  uint8_t *buffer, uint16_t *byte_len)
+{
+	int i;
+	int start = 0;
+
+	for (i = 0; i < layout_size; i++) {
+		int index, size, offset;
+		const char *hint;
+		const uint8_t *addr;
+		struct cpfl_flow_mr_key_action *temp;
+		struct cpfl_flow_js_mr_layout *layout;
+
+		layout = &layouts[i];
+		/* index links to the element of the actions array. */
+		index = layout->index;
+		size = layout->size;
+		offset = layout->offset;
+		if (index == -1) {
+			hint = "dummpy";
+			start += size;
+			continue;
+		}
+		hint = layout->hint;
+		addr = NULL;
+		temp = mr_key_action + index;
+		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+			struct rte_flow_item *definition;
+			int def_length, k;
+
+			action_vxlan_encap =
+			    (const struct rte_flow_action_vxlan_encap *)temp->encap.action->conf;
+			definition = action_vxlan_encap->definition;
+			def_length = cpfl_get_items_length(definition);
+			for (k = 0; k < def_length - 1; k++) {
+				if ((strcmp(hint, "eth") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_ETH) ||
+				    (strcmp(hint, "ipv4") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+				    (strcmp(hint, "udp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_UDP) ||
+				    (strcmp(hint, "tcp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_TCP) ||
+				    (strcmp(hint, "vxlan") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_VXLAN)) {
+					addr = (const uint8_t *)(definition[k].spec);
+					if (start > 255) {
+						*byte_len = 0;
+						PMD_DRV_LOG(ERR, "byte length is too long%s",
+							    hint);
+						return -EINVAL;
+					}
+					memcpy(buffer + start, addr + offset, size);
+					break;
+				} /* TODO: more hint... */
+			}
+			if (k == def_length - 1) {
+				*byte_len = 0;
+				PMD_DRV_LOG(ERR, "can not find corresponding hint: %s", hint);
+				return -EINVAL;
+			}
+		} else {
+			*byte_len = 0;
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", temp->type);
+			return -EINVAL;
+		}
+		/* else TODO: more type... */
+
+		start += size;
+	}
+	*byte_len = start;
+
+	return 0;
+}
+
+static int
+cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action,
+		     struct cpfl_flow_mr_key_action *mr_key_action,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	enum cpfl_flow_mr_action_type type;
+
+	/* mr->action->type */
+	type = action->type;
+	/* mr->action->data */
+	if (type == CPFL_JS_MR_ACTION_TYPE_MOD) {
+		struct cpfl_flow_js_mr_layout *layout;
+
+		mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		mr_action->mod.byte_len = 0;
+		mr_action->mod.prof = action->mod.prof;
+		layout = action->mod.layout;
+		if (layout) {
+			int ret;
+
+			memset(mr_action->mod.data, 0, sizeof(mr_action->mod.data));
+			ret = cpfl_parse_layout(layout, action->mod.layout_size, mr_key_action,
+						mr_action->mod.data, &mr_action->mod.byte_len);
+			if (ret < 0)
+				return -EINVAL;
+		}
+		return 0;
+	}
+	PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+
+	return -EINVAL;
+}
+
+static int
+cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action *actions,
+		   struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int key_action_size;
+
+	/* mr->key->actions */
+	key_action_size = mr->key.actions_size;
+	return cpfl_parse_mr_key_action(mr->key.actions, key_action_size, actions, mr_key_action);
+}
+
+/* output: struct cpfl_flow_mr_action *mr_action */
+static int
+cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	int i, size;
+	struct cpfl_flow_mr_key_action mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0};
+
+	size = parser->mr_size;
+
+	for (i = 0; i < size; i++) {
+		int ret;
+		struct cpfl_flow_js_mr *mr;
+
+		mr = &parser->modifications[i];
+		ret = cpfl_check_mod_key(mr, actions, mr_key_action);
+		if (ret < 0)
+			continue;
+		/* mr->action */
+		ret = cpfl_parse_mr_action(&mr->action, mr_key_action, mr_action);
+		if (!ret)
+			return 0;
+	}
+
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+			struct cpfl_flow_mr_action *mr_action)
+{
+	/* modifications rules */
+	if (!parser->modifications) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	return cpfl_parse_mod_rules(parser, actions, mr_action);
+}
+
+void
+cpfl_metadata_write16(struct cpfl_metadata *meta, int type, int offset, uint16_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset],
+		   &data,
+		   sizeof(uint16_t));
+}
+
+void
+cpfl_metadata_write32(struct cpfl_metadata *meta, int type, int offset, uint32_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset],
+		   &data,
+		   sizeof(uint32_t));
+}
+
+uint16_t
+cpfl_metadata_read16(struct cpfl_metadata *meta, int type, int offset)
+{
+	return *((uint16_t *)(&meta->chunks[type].data[offset]));
+}
+
+bool
+cpfl_metadata_write_port_id(struct cpfl_itf *itf)
+{
+	uint32_t dev_id;
+	const int type = 0;
+	const int offset = 5;
+
+	dev_id = cpfl_get_port_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID\n");
+		return false;
+	}
+	dev_id = dev_id << 3;
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_targetvsi(struct cpfl_itf *itf)
+{
+	uint32_t dev_id;
+	const int type = 6;
+	const int offset = 2;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	dev_id = dev_id << 1;
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf)
+{
+	uint32_t dev_id;
+	const int type = 6;
+	const int offset = 0;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
+
+void
+cpfl_metadata_init(struct cpfl_metadata *meta)
+{
+	int i;
+
+	for (i = 0; i < CPFL_META_LENGTH; i++)
+		meta->chunks[i].type = i;
+}
+
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf)
+{
+	uint32_t dev_id;
+	const int type = 0;
+	const int offset = 24;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
new file mode 100644
index 0000000000..ab755e3359
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <json-c/json.h>
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#ifndef _CPFL_FLOW_PARSER_H_
+#define _CPFL_FLOW_PARSER_H_
+
+#define CPFL_FLOW_JSON_STR_SIZE_MAX 100
+#define CPFL_MAX_SEM_FV_KEY_SIZE 64
+#define CPFL_FLOW_JS_PROTO_SIZE 16
+#define CPFL_MOD_KEY_NUM_MAX 8
+
+/* Pattern Rules Storage */
+enum cpfl_flow_pr_action_type {
+	CPFL_JS_PR_ACTION_TYPE_SEM,
+	CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
+};
+
+/* This structure matches a sequence of fields in struct rte_flow_attr */
+struct cpfl_flow_js_pr_key_attr {
+	uint16_t ingress;
+	uint16_t egress;
+};
+
+struct cpfl_flow_js_pr_key_proto_field {
+	char name[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	union {
+		char mask[CPFL_FLOW_JSON_STR_SIZE_MAX];
+		uint32_t mask_32b;
+	};
+};
+
+/* This structure matches a sequence of "struct rte_flow_item" */
+struct cpfl_flow_js_pr_key_proto {
+	enum rte_flow_item_type type;
+	struct cpfl_flow_js_pr_key_proto_field *fields;
+	int fields_size;
+};
+
+enum cpfl_flow_js_fv_type {
+	CPFL_FV_TYPE_PROTOCOL,
+	CPFL_FV_TYPE_IMMEDIATE,
+	CPFL_FV_TYPE_METADATA,
+	CPFL_FV_TYPE_UNKNOWN = -1,
+};
+
+struct cpfl_flow_js_fv {
+	uint16_t offset;
+	enum cpfl_flow_js_fv_type type;
+	union {
+		uint16_t immediate;
+		struct {
+			uint16_t layer;
+			enum rte_flow_item_type header;
+			uint16_t offset;
+			uint16_t mask;
+		} proto;
+		struct {
+			uint16_t type;
+			uint16_t offset;
+			uint16_t mask;
+		} meta;
+	};
+};
+
+/**
+ * This structure defines the message be used to composite the
+ * profile / key of a SEM control packet
+ */
+struct cpfl_flow_js_pr_action_sem {
+	uint16_t prof;		    /* SEM profile ID */
+	uint16_t subprof;	    /* SEM subprofile ID */
+	uint16_t keysize;	    /*  extract key size in bytes */
+	struct cpfl_flow_js_fv *fv; /* A SEM field vector array */
+	int fv_size;
+};
+
+struct cpfl_flow_js_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_js_pr_action_sem sem;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD how to parse rte_flow
+ * protocol headers. Each rule be described by a key object and a action array.
+ */
+struct cpfl_flow_js_pr {
+	struct {
+		struct cpfl_flow_js_pr_key_proto *protocols;
+		uint16_t proto_size;
+		struct cpfl_flow_js_pr_key_attr *attributes;
+		uint16_t attr_size;
+	} key;
+	/* An array to define how to map current key to low level pipeline configuration. */
+	struct cpfl_flow_js_pr_action *actions;
+	uint16_t actions_size;
+};
+
+/* Modification Rules Storage */
+/**
+ * The vxlan_encap action matches RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
+ * and include a sequence of protocol headers defined in field protocols
+ * of data.
+ */
+struct cpfl_flow_js_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	int proto_size;
+};
+
+/* A set of modification rte_flow_action_xxx objects can be defined as a type / data pair. */
+struct cpfl_flow_js_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_js_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_js_mr_key {
+	struct cpfl_flow_js_mr_key_action *actions;
+	int actions_size;
+};
+
+struct cpfl_flow_js_mr_layout {
+	int index;
+	char hint[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	uint16_t offset;
+	uint16_t size;
+};
+
+struct cpfl_flow_js_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	struct cpfl_flow_js_mr_layout *layout;
+	int layout_size;
+};
+
+enum cpfl_flow_mr_action_type {
+	CPFL_JS_MR_ACTION_TYPE_MOD,
+};
+
+/** Currently, the type can only be mod.
+ *
+ * For mod data, besides the profile ID, a layout array defines a set
+ * of hints that helps driver composing the MOD memory region when the
+ * action need to insert/update some packet data from user input.
+ */
+struct cpfl_flow_js_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_js_mr_action_mod mod;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD to parse rte_flow modification
+ * actions. Each rule be described by a pair of key and action
+ */
+struct cpfl_flow_js_mr {
+	struct cpfl_flow_js_mr_key key;
+	struct cpfl_flow_js_mr_action action;
+};
+
+struct cpfl_flow_js_parser {
+	struct cpfl_flow_js_pr *patterns;
+	int pr_size;
+	struct cpfl_flow_js_mr *modifications;
+	int mr_size;
+};
+
+/* Pattern Rules */
+struct cpfl_flow_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	uint8_t cpfl_flow_pr_fv[CPFL_MAX_SEM_FV_KEY_SIZE];
+};
+
+struct cpfl_flow_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_pr_action_sem sem;
+	};
+};
+
+/* Modification Rules */
+struct cpfl_flow_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	uint16_t proto_size;
+	const struct rte_flow_action *action;
+};
+
+struct cpfl_flow_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	uint8_t data[256];
+};
+
+struct cpfl_flow_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_mr_action_mod mod;
+	};
+};
+
+int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
+int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
+int cpfl_flow_parse_items(struct cpfl_itf *itf,
+			  struct cpfl_flow_js_parser *parser,
+			  const struct rte_flow_item *items,
+			  const struct rte_flow_attr *attr,
+			  struct cpfl_flow_pr_action *pr_action);
+int cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser,
+			    const struct rte_flow_action *actions,
+			    struct cpfl_flow_mr_action *mr_action);
+void cpfl_metadata_init(struct cpfl_metadata *meta);
+bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf);
+void cpfl_metadata_write16(struct cpfl_metadata *meta, int type, int offset, uint16_t data);
+void cpfl_metadata_write32(struct cpfl_metadata *meta, int type, int offset, uint32_t data);
+uint16_t cpfl_metadata_read16(struct cpfl_metadata *meta, int type, int offset);
+
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 0be25512c3..7b8d043011 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -41,6 +41,9 @@ endif
 
 js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
+    sources += files(
+        'cpfl_flow_parser.c',
+    )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
 endif
\ No newline at end of file
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v3 3/9] net/cpfl: add FXP low level implementation
  2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
                       ` (2 preceding siblings ...)
  2023-09-06  9:34     ` [PATCH v3 2/9] net/cpfl: add flow json parser Wenjing Qiao
@ 2023-09-06  9:34     ` Wenjing Qiao
  2023-09-06  9:34     ` [PATCH v3 4/9] net/cpfl: setup ctrl path Wenjing Qiao
                       ` (6 subsequent siblings)
  10 siblings, 0 replies; 128+ messages in thread
From: Wenjing Qiao @ 2023-09-06  9:34 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing
  Cc: mingxia.liu, Wenjing Qiao

Add FXP low level implementation for CPFL rte_flow to
create/delete rules.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_actions.h  | 858 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.c | 379 ++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  51 ++
 drivers/net/cpfl/cpfl_rules.c    | 126 +++++
 drivers/net/cpfl/cpfl_rules.h    | 306 +++++++++++
 drivers/net/cpfl/meson.build     |   2 +
 6 files changed, 1722 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h
new file mode 100644
index 0000000000..7b82119e39
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_actions.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_ACTIONS_H_
+#define _CPFL_ACTIONS_H_
+
+#include "base/idpf_osdep.h"
+
+#pragma pack(1)
+
+union cpfl_action_set {
+	uint32_t data;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 4;
+		uint32_t tag : 1;
+		uint32_t prec : 3;
+	} set_24b_a;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 3;
+		uint32_t tag : 2;
+		uint32_t prec : 3;
+	} set_24b_b;
+
+	struct {
+		uint32_t val : 16;
+		uint32_t idx : 4;
+		uint32_t unused : 6;
+		uint32_t tag : 3;
+		uint32_t prec : 3;
+	} set_16b;
+
+	struct {
+		uint32_t val_a : 8;
+		uint32_t val_b : 8;
+		uint32_t idx_a : 4;
+		uint32_t idx_b : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_8b;
+
+	struct {
+		uint32_t val : 10;
+		uint32_t ena : 10;
+		uint32_t idx : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_1b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} nop;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} chained_24b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} aux_flags;
+};
+
+struct cpfl_action_set_ext {
+#define CPFL_ACTION_SET_EXT_CNT 2
+	union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT];
+};
+
+#pragma pack()
+
+/**
+ * cpfl_act_nop - Encode a NOP action
+ */
+static inline union cpfl_action_set
+cpfl_act_nop(void)
+{
+	union cpfl_action_set act;
+
+	act.data = 0;
+	return act;
+}
+
+/**
+ * cpfl_is_nop_action - Indicate if an action set is a NOP
+ */
+static inline bool
+cpfl_is_nop_action(union cpfl_action_set *act)
+{
+	return act->data == cpfl_act_nop().data;
+}
+
+#define CPFL_MAKE_MASK32(b, s)	((((uint32_t)1 << (b)) - 1) << (s))
+
+#define CPFL_ACT_PREC_MAX	7
+#define CPFL_ACT_PREC_S		29
+#define CPFL_ACT_PREC_M		CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S)
+#define CPFL_ACT_PREC_SET(p)	\
+	(((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M)
+#define CPFL_ACT_PREC_CHECK(p)	((p) > 0 && (p) <= CPFL_ACT_PREC_MAX)
+
+#define CPFL_METADATA_ID_CNT		32	/* Max number of metadata IDs */
+#define CPFL_METADATA_STRUCT_MAX_SZ	128	/* Max metadata size per ID */
+
+/*******************************************************************************
+ * 1-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_1B_OP_S	24
+#define CPFL_ACT_1B_OP_M	CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S)
+#define CPFL_ACT_1B_OP		((uint32_t)(0x01) << CPFL_ACT_1B_OP_S)
+
+#define CPFL_ACT_1B_VAL_S	0
+#define CPFL_ACT_1B_VAL_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S)
+#define CPFL_ACT_1B_EN_S	10
+#define CPFL_ACT_1B_EN_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S)
+#define CPFL_ACT_1B_INDEX_S	20
+#define CPFL_ACT_1B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S)
+
+/* 1-bit actions currently uses only INDEX of 0 */
+#define CPFL_ACT_MAKE_1B(prec, en, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \
+	 ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \
+	 (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M))
+
+enum cpfl_act_1b_op {
+	CPFL_ACT_1B_OP_DROP		= 0x01,
+	CPFL_ACT_1B_OP_HDR_SPLIT	= 0x02,
+	CPFL_ACT_1B_OP_DIR_CHANGE	= 0x04,
+	CPFL_ACT_1B_OP_DEFER_DROP	= 0x08,
+	CPFL_ACT_1B_OP_ORIG_MIR_MD	= 0x80
+};
+
+#define CPFL_ACT_1B_COMMIT_MODE_S	4
+#define CPFL_ACT_1B_COMMIT_MODE_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S)
+
+/**
+ * cpfl_act_commit_mode - action commit mode for certain action classes
+ */
+enum cpfl_act_commit_mode {
+	/* Action processing for the initial classification pass */
+	CPFL_ACT_COMMIT_ALL		= 0, /* Commit all actions */
+	CPFL_ACT_COMMIT_PRE_MOD		= 1, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_NONE		= 2, /* Commit no action */
+	/* Action processing for deferred actions in a recirculation pass */
+	CPFL_ACT_COMMIT_RECIR_ALL	= 4, /* Commit all actions */
+	CPFL_ACT_COMMIT_RECIR_PRE_MOD	= 5, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_RECIR_NONE	= 6  /* Commit no action */
+};
+
+/*******************************************************************************
+ * 8-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_8B_S	24
+#define CPFL_ACT_OP_8B_M	CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S)
+#define CPFL_ACT_OP_8B		((uint32_t)(0x02) << CPFL_ACT_OP_8B_S)
+
+#define CPFL_ACT_8B_A_VAL_S	0
+#define CPFL_ACT_8B_A_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S)
+#define CPFL_ACT_8B_A_INDEX_S	16
+#define CPFL_ACT_8B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S)
+
+#define CPFL_ACT_8B_B_VAL_S	8
+#define CPFL_ACT_8B_B_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S)
+#define CPFL_ACT_8B_B_INDEX_S	20
+#define CPFL_ACT_8B_B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S)
+
+/* Unless combining two 8-bit actions into an action set, both A and B fields
+ * must be the same,
+ */
+#define CPFL_ACT_MAKE_8B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \
+	 (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \
+	 (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \
+	 (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \
+	 (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M))
+
+/* 8-Bit Action Indices */
+#define CPFL_ACT_8B_INDEX_MOD_META		9
+
+/* 8-Bit Action Miscellaneous */
+#define CPFL_ACT_8B_MOD_META_PROF_CNT		16
+#define CPFL_ACT_8B_MOD_META_VALID		0x80
+
+/*******************************************************************************
+ * 16-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_16B_S	26
+#define CPFL_ACT_OP_16B_M	CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S)
+#define CPFL_ACT_OP_16B		((uint32_t)0x1 << CPFL_ACT_OP_16B_S)
+
+#define CPFL_ACT_16B_INDEX_S	16
+#define CPFL_ACT_16B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S)
+#define CPFL_ACT_16B_VAL_S	0
+#define CPFL_ACT_16B_VAL_M	CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S)
+
+#define CPFL_ACT_MAKE_16B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \
+	 (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M))
+
+/* 16-Bit Action Indices */
+#define CPFL_ACT_16B_INDEX_COUNT_SET		0
+#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX	1
+#define CPFL_ACT_16B_INDEX_SET_VSI		2
+#define CPFL_ACT_16B_INDEX_DEL_MD		4
+#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST		5
+
+/* 16-Bit Action Miscellaneous */
+#define CPFL_ACT_16B_COUNT_SET_CNT		2048 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_SET_VSI_SLOTS		2
+#define CPFL_ACT_16B_FWD_VSI_CNT		1032 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_FWD_VSI_LIST_CNT		256
+#define CPFL_ACT_16B_MOD_VSI_LIST_CNT		1024
+#define CPFL_ACT_16B_FWD_PORT_CNT		4
+#define CPFL_ACT_16B_DEL_MD_MID_CNT		32
+#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS		4
+
+/* 16-Bit SET_MCAST_IDX Action */
+#define CPFL_ACT_16B_SET_MCAST_VALID	((uint32_t)1 << 15)
+
+/* 16-Bit SET_VSI Action Variants */
+#define CPFL_ACT_16B_SET_VSI_VAL_S		0
+#define CPFL_ACT_16B_SET_VSI_VAL_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S)
+#define CPFL_ACT_16B_SET_VSI_PE_S		11
+#define CPFL_ACT_16B_SET_VSI_PE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S)
+#define CPFL_ACT_16B_SET_VSI_TYPE_S		14
+#define CPFL_ACT_16B_SET_VSI_TYPE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S)
+
+/* 16-Bit DEL_MD Action */
+#define CPFL_ACT_16B_DEL_MD_0_S		0
+#define CPFL_ACT_16B_DEL_MD_1_S		5
+
+/* 16-Bit MOD_VSI_LIST Actions */
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S	0
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M	\
+	CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S)
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S	14
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S)
+#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \
+	((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \
+	 (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_ID_M))
+
+#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \
+	((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \
+		CPFL_ACT_16B_SET_VSI_TYPE_M) | \
+	 (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \
+		CPFL_ACT_16B_SET_VSI_PE_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \
+		CPFL_ACT_16B_SET_VSI_VAL_M))
+
+enum cpfl_prot_eng {
+	CPFL_PE_LAN = 0,
+	CPFL_PE_RDMA,
+	CPFL_PE_CRT
+};
+
+enum cpfl_act_fwd_type {
+	CPFL_ACT_FWD_VSI,
+	CPFL_ACT_FWD_VSI_LIST,
+	CPFL_ACT_FWD_PORT
+};
+
+/*******************************************************************************
+ * 24-Bit Actions
+ ******************************************************************************/
+/* Group A */
+#define CPFL_ACT_OP_24B_A_S	28
+#define CPFL_ACT_OP_24B_A_M	CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S)
+#define CPFL_ACT_24B_A_INDEX_S	24
+#define CPFL_ACT_24B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S)
+#define CPFL_ACT_24B_A_VAL_S	0
+#define CPFL_ACT_24B_A_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S)
+
+#define CPFL_ACT_OP_24B_A	((uint32_t)1 << CPFL_ACT_OP_24B_A_S)
+
+#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_MOD_ADDR	0
+#define CPFL_ACT_24B_INDEX_MIRROR_FIRST	1
+#define CPFL_ACT_24B_INDEX_COUNT	2
+#define CPFL_ACT_24B_INDEX_SET_Q	8
+#define CPFL_ACT_24B_INDEX_MOD_PROFILE	9
+#define CPFL_ACT_24B_INDEX_METER	10
+
+#define CPFL_ACT_24B_COUNT_SLOTS	6
+#define CPFL_ACT_24B_METER_SLOTS	6
+
+#define CPFL_ACT_24B_MOD_ADDR_CNT	(16 * 1024 * 1024)
+#define CPFL_ACT_24B_COUNT_ID_CNT	((uint32_t)1 << 24)
+#define CPFL_ACT_24B_SET_Q_CNT		(12 * 1024)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS	3
+
+/* 24-Bit SET_Q Action */
+#define CPFL_ACT_24B_SET_Q_Q_S		0
+#define CPFL_ACT_24B_SET_Q_Q_M		\
+	CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_S	14
+#define CPFL_ACT_24B_SET_Q_Q_RGN_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S)
+#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS	CPFL_MAKE_MASK32(1, 17)
+#define CPFL_ACT_24B_SET_Q_DST_PE_S	21
+#define CPFL_ACT_24B_SET_Q_DST_PE_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S)
+#define CPFL_ACT_24B_SET_Q_VALID	CPFL_MAKE_MASK32(1, 23)
+
+/* 24-Bit MOD_PROFILE Action */
+enum cpfl_act_mod_profile_hint {
+	CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */
+};
+
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_S		0
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S)
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S	12
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S)
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_S		14
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S)
+#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS		((uint32_t)1 << 16)
+#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND	((uint32_t)1 << 17)
+#define CPFL_ACT_24B_MOD_PROFILE_VALID			((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES	4
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT		2048
+
+/* 24-Bit METER Actions */
+#define CPFL_ACT_24B_METER_INDEX_S	0
+#define CPFL_ACT_24B_METER_INDEX_M	\
+	CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S)
+#define CPFL_ACT_24B_METER_BANK_S	20
+#define CPFL_ACT_24B_METER_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S)
+#define CPFL_ACT_24B_METER_VALID	((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_METER_BANK_CNT	6
+#define CPFL_ACT_24B_METER_INDEX_CNT	((uint32_t)1 << 20)
+
+/* Group B */
+#define CPFL_ACT_OP_24B_B_S	27
+#define CPFL_ACT_OP_24B_B_M	CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S)
+#define CPFL_ACT_24B_B_INDEX_S	24
+#define CPFL_ACT_24B_B_INDEX_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S)
+#define CPFL_ACT_24B_B_VAL_S	0
+#define CPFL_ACT_24B_B_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S)
+
+#define CPFL_ACT_OP_24B_B	((uint32_t)1 << CPFL_ACT_OP_24B_B_S)
+
+#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_SET_MD	0
+#define CPFL_ACT_24B_INDEX_RANGE_CHECK	6
+#define CPFL_ACT_24B_SET_MD_SLOTS	6
+
+/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */
+/* 8-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD8_VAL_S	0
+#define CPFL_ACT_24B_SET_MD8_VAL_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S)
+#define CPFL_ACT_24B_SET_MD8_MASK_S	8
+#define CPFL_ACT_24B_SET_MD8_MASK_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S)
+#define CPFL_ACT_24B_SET_MD8_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD8_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S)
+/* 16-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD16_VAL_S	0
+#define CPFL_ACT_24B_SET_MD16_VAL_M	\
+	CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_L_S	16 /* For chained action */
+#define CPFL_ACT_24B_SET_MD16_MASK_L_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_H_SR	8
+#define CPFL_ACT_24B_SET_MD16_MASK_H_M	0xff
+#define CPFL_ACT_24B_SET_MD16_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD16_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD16		((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_SET_MD32_VAL_L_M	CPFL_MAKE_MASK32(24, 0)
+
+#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX		15
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX	7
+#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX	15
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX	7
+
+/* RANGE_CHECK Action */
+enum cpfl_rule_act_rc_mode {
+	CPFL_RULE_ACT_RC_1_RANGE = 0,
+	CPFL_RULE_ACT_RC_2_RANGES = 1,
+	CPFL_RULE_ACT_RC_4_RANGES = 2,
+	CPFL_RULE_ACT_RC_8_RANGES = 3
+};
+
+#define CPFL_ACT_24B_RC_TBL_IDX_S	0
+#define CPFL_ACT_24B_RC_TBL_IDX_M	\
+	CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S)
+#define CPFL_ACT_24B_RC_START_BANK_S	13
+#define CPFL_ACT_24B_RC_START_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S)
+#define CPFL_ACT_24B_RC_MODE_S		16
+#define CPFL_ACT_24B_RC_MODE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S)
+#define CPFL_ACT_24B_RC_XTRACT_PROF_S	18
+#define CPFL_ACT_24B_RC_XTRACT_PROF_M	\
+	CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S)
+
+#define CPFL_ACT_24B_RC_TBL_INDEX_CNT	(8 * 1024)
+#define CPFL_ACT_24B_RC_BANK_CNT	8
+#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT	64
+
+/*******************************************************************************
+ * 24-Bit Chained Auxiliary Actions
+ ******************************************************************************/
+
+/* TODO: HAS is being updated.  Revise the order of chained and base action
+ * when the HAS has it finalized.
+ */
+/**
+ * 24-Bit Chained SET_MD Actions
+ *
+ * Chained SET_MD actions consume two consecutive action sets.  The first one is
+ * the chained AUX action set.  The second one is the base/parent action set.
+ * Chained SET_MD actions can add and/or update metadata structure with IDs from
+ * 0 to 31 while the non-chained SET_MD variants can only update existing meta-
+ * data IDs below 16.
+ */
+
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S	8
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M	\
+	CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD_AUX_ADD		((uint32_t)1 << 15)
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S	16
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_S		0
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_M		\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S	0
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S)
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR	24 /* Upper 8 bits of MD32 */
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M	0xff
+
+#define CPFL_ACT_TYPE_CHAIN_DATA_S	29
+#define CPFL_ACT_TYPE_CHAIN_DATA_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S)
+#define CPFL_ACT_TYPE_CHAIN_DATA	((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_OP_S	21
+#define CPFL_ACT_24B_SET_MD_OP_8B	((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_16B	((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_32B	((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \
+	(CPFL_ACT_TYPE_CHAIN_DATA | (op) | \
+	 (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \
+	 (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \
+	 (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_DATA_M))
+
+/*******************************************************************************
+ * 1-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_drop - Encode a 1-bit DROP action
+ *
+ * The DROP action has precedence over the DEFER_DOP action.
+ * Affect of ACT_COMMIT action on the DROP action:
+ *  - CPFL_ACT_COMMIT_ALL: Packet is dropped.
+ *  - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped.
+ *  - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped.  Recirculation is canceled.
+ *  - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not
+ *    dropped. Recirculation continues.
+ *
+ * Once a DROP action is set, it cannot be reverted during the classification
+ * process of a network packet.
+ */
+static inline union cpfl_action_set
+cpfl_act_drop(uint8_t prec)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1);
+	return a;
+}
+
+/**
+ * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action
+ * An ACT_COMMIT action specifies if and when all actions are committed.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M,
+				  (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S);
+	return a;
+}
+
+/*******************************************************************************
+ * 8-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_meta - Encode an 8-bit MOD_META action
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_meta(uint8_t prec, uint8_t prof)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META,
+				  CPFL_ACT_8B_MOD_META_VALID | prof);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 16-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI)
+ *
+ * This encodes the "Forward to Single VSI" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    vsi >= CPFL_ACT_16B_FWD_VSI_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port)
+ *
+ * This encodes the "Forward to a port" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    port >= CPFL_ACT_16B_FWD_PORT_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 24-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action
+ *
+ * This MOD_ADDR specifies the index of the MOD content entry an accompanying
+ * MOD_PROFILE action uses.  Some MOD_PROFILE actions may need to use extra
+ * information from a Modify content entry, and requires an accompanying
+ * MOD_ADDR action.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR,
+				     mod_addr);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant)
+ *
+ * This action is a "Forward to a single queue" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q,
+			bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region)
+ *
+ * This action is a "Forward to a queue region" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base,
+			       uint8_t q_rgn_bits, bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT ||
+	    q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base |
+		((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action
+ *
+ * This action specifies a Modify profile to use for modifying the network
+ * packet being classified.  In addition, it also provides a hint to whether
+ * or not an accompanied MOD_ADDR action is expected and should be prefetched.
+ *
+ * There is only one MOD_PROFILE action slot.  If multiple classification blocks
+ * emit this action, the precedence value and auxiliary precedence value will be
+ * used to select one with higher precedence.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus,
+		     bool miss_prepend, enum cpfl_act_mod_profile_hint hint)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) ||
+	    prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT ||
+	    ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_MOD_PROFILE_VALID |
+		(((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) &
+			CPFL_ACT_24B_MOD_PROFILE_HINT_M) |
+		(((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) &
+			CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) |
+		((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S);
+	if (append_act_bus)
+		val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS;
+	if (miss_prepend)
+		val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND;
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_meter - Encode a 24-bit METER action
+ *
+ * Return NOP if any given input parameter is invalid.
+ *
+ * A bank can only be used by one of the METER action slots.  If multiple METER
+ * actions select the same bank, the action with the highest action slot wins.
+ * In Policer mode, METER actions at the higher indexes have precedence over
+ * ones at lower indexes.
+ */
+static inline union cpfl_action_set
+cpfl_act_meter(uint8_t slot, uint8_t prec, uint32_t idx, uint8_t bank)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS  ||
+	    idx >= CPFL_ACT_24B_METER_INDEX_CNT ||
+	    bank >= CPFL_ACT_24B_METER_BANK_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_METER_VALID |
+		(uint32_t)idx << CPFL_ACT_24B_METER_INDEX_S |
+		(uint32_t)bank << CPFL_ACT_24B_METER_BANK_S;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot,
+				     val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot
+ *
+ * This SET_MD action sets/updates a byte of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 bytes of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX ||
+	    off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) |
+		((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) |
+		((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot
+ *
+ * This SET_MD action sets/updates a word of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 words of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX ||
+	    word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) |
+		((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) |
+		((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot
+ *
+ * This SET_MD action sets/updates a dword of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action is made up of 2 chained
+ * action sets.  The chained action set is the first.  The base/parent action
+ * sets is the second.
+ */
+static inline void
+cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid,
+		      uint8_t off, uint32_t val)
+{
+	if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) ||
+	    mid >= CPFL_METADATA_ID_CNT ||
+	    (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) {
+		ext->acts[0] = cpfl_act_nop();
+		ext->acts[1] = cpfl_act_nop();
+	} else {
+		uint32_t tmp;
+
+		/* Chained action set comes first */
+		tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR;
+		ext->acts[0].data =
+			CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B,
+						     mid, off, tmp);
+
+		/* Lower 24 bits of value */
+		tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M;
+		ext->acts[1].data =
+			CPFL_ACT_MAKE_24B_B(prec,
+					    CPFL_ACT_24B_INDEX_SET_MD + slot,
+					    tmp);
+	}
+}
+
+#endif /* _CPFL_ACTIONS_H_ */
diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
new file mode 100644
index 0000000000..476c78f235
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include "cpfl_controlq.h"
+#include "base/idpf_controlq.h"
+#include "rte_common.h"
+
+/**
+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
+ * @qinfo: pointer to create control queue info struct
+ *
+ * Verify that DMA parameter of each DMA memory struct is present and
+ * consistent with control queue parameters
+ */
+static inline int
+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
+{
+	struct idpf_dma_mem *ring = &qinfo->ring_mem;
+	struct idpf_dma_mem *buf = &qinfo->buf_mem;
+
+	if (!ring->va || !ring->size)
+		return -EBADR;
+
+	if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
+		return -EINVAL;
+
+	/* no need for buffer checks for TX queues */
+	if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
+		return 0;
+
+	if (!buf->va || !buf->size)
+		return -EBADR;
+
+	/* accommodate different types of rx ring buffer sizes */
+	if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
+	     buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
+	    (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
+	     buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
+		return -EBADR;
+
+	return 0;
+}
+
+/**
+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ * @qinfo: pointer to create queue info struct
+ *
+ * The CP takes care of all DMA memory allocations. Store the allocated memory
+ * information for the descriptor ring and buffers. If the memory for either the
+ * descriptor ring or the buffers is not allocated properly and/or inconsistent
+ * with the control queue parameters, this routine will free the memory for
+ * both the descriptors and the buffers
+ */
+int
+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
+			 struct cpfl_ctlq_create_info *qinfo)
+{
+	int ret_code = 0;
+	unsigned int elem_size;
+	int i = 0;
+
+	ret_code = cpfl_check_dma_mem_parameters(qinfo);
+	if (ret_code)
+		/* TODO: Log an error message per CP */
+		goto err;
+
+	cq->desc_ring.va = qinfo->ring_mem.va;
+	cq->desc_ring.pa = qinfo->ring_mem.pa;
+	cq->desc_ring.size = qinfo->ring_mem.size;
+
+	switch (cq->cq_type) {
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_RDMA_RX:
+		/* Only receive queues will have allocated buffers
+		 * during init.  CP allocates one big chunk of DMA
+		 * region who size is equal to ring_len * buff_size.
+		 * In CPFLib, the block gets broken down to multiple
+		 * smaller blocks that actually gets programmed in the hardware.
+		 */
+
+		cq->bi.rx_buff = (struct idpf_dma_mem **)
+			idpf_calloc(hw, cq->ring_size,
+				    sizeof(struct idpf_dma_mem *));
+		if (!cq->bi.rx_buff) {
+			ret_code = -ENOMEM;
+			/* TODO: Log an error message per CP */
+			goto err;
+		}
+
+		elem_size = qinfo->buf_size;
+		for (i = 0; i < cq->ring_size; i++) {
+			cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
+					    (hw, 1,
+					     sizeof(struct idpf_dma_mem));
+			if (!cq->bi.rx_buff[i]) {
+				ret_code = -ENOMEM;
+				goto free_rx_buffs;
+			}
+			cq->bi.rx_buff[i]->va =
+			    (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));
+			cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
+					       (i * elem_size);
+			cq->bi.rx_buff[i]->size = elem_size;
+		}
+		break;
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_RDMA_TX:
+	case IDPF_CTLQ_TYPE_RDMA_COMPL:
+		break;
+	default:
+		ret_code = -EBADR;
+	}
+
+	return ret_code;
+
+free_rx_buffs:
+	i--;
+	for (; i >= 0; i--)
+		idpf_free(hw, cq->bi.rx_buff[i]);
+
+	if (!cq->bi.rx_buff)
+		idpf_free(hw, cq->bi.rx_buff);
+
+err:
+	return ret_code;
+}
+
+/**
+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	for (i = 0; i < cq->ring_size; i++) {
+		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+		/* No buffer to post to descriptor, continue */
+		if (!bi)
+			continue;
+
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+		desc->opcode = 0;
+		desc->datalen = CPU_TO_LE16(bi->size);
+		desc->ret_val = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+		desc->params.indirect.param0 = 0;
+		desc->params.indirect.param1 = 0;
+	}
+}
+
+/**
+ * cpfl_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
+{
+	/* set control queue registers in our local struct */
+	cq->reg.head = q_create_info->reg.head;
+	cq->reg.tail = q_create_info->reg.tail;
+	cq->reg.len = q_create_info->reg.len;
+	cq->reg.bah = q_create_info->reg.bah;
+	cq->reg.bal = q_create_info->reg.bal;
+	cq->reg.len_mask = q_create_info->reg.len_mask;
+	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+	cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * cpfl_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
+{
+	/* Update tail to post pre-allocated buffers for rx queues */
+	if (is_rxq)
+		wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
+
+	/* For non-Mailbox control queues only TAIL need to be set */
+	if (cq->q_id != -1)
+		return;
+
+	/* Clear Head for both send or receive */
+	wr32(hw, cq->reg.head, 0);
+
+	/* set starting point */
+	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
+ * @hw: context info for the callback
+ * @cq: pointer to the specific control queue
+ *
+ * DMA buffers are released by the CP itself
+ */
+static void
+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
+{
+	int i;
+
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
+	    cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
+		for (i = 0; i < cq->ring_size; i++)
+			idpf_free(hw, cq->bi.rx_buff[i]);
+		/* free the buffer header */
+		idpf_free(hw, cq->bi.rx_buff);
+	} else {
+		idpf_free(hw, cq->bi.tx_msg);
+	}
+}
+
+/**
+ * cpfl_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ */
+int
+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+	      struct idpf_ctlq_info **cq_out)
+{
+	struct idpf_ctlq_info *cq;
+	bool is_rxq = false;
+	int status = 0;
+
+	if (!qinfo->len || !qinfo->buf_size ||
+	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+		return -EINVAL;
+
+	cq = (struct idpf_ctlq_info *)
+	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+
+	if (!cq)
+		return -ENOMEM;
+
+	cq->cq_type = qinfo->type;
+	cq->q_id = qinfo->id;
+	cq->buf_size = qinfo->buf_size;
+	cq->ring_size = qinfo->len;
+
+	cq->next_to_use = 0;
+	cq->next_to_clean = 0;
+	cq->next_to_post = cq->ring_size - 1;
+
+	switch (qinfo->type) {
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+		is_rxq = true;
+		/* fallthrough */
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+		status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
+		break;
+
+	default:
+		status = -EINVAL;
+		break;
+	}
+
+	if (status)
+		goto init_free_q;
+
+	if (is_rxq) {
+		cpfl_ctlq_init_rxq_bufs(cq);
+	} else {
+		/* Allocate the array of msg pointers for TX queues */
+		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
+			idpf_calloc(hw, qinfo->len,
+				    sizeof(struct idpf_ctlq_msg *));
+		if (!cq->bi.tx_msg) {
+			status = -ENOMEM;
+			goto init_dealloc_q_mem;
+		}
+	}
+
+	cpfl_ctlq_setup_regs(cq, qinfo);
+
+	cpfl_ctlq_init_regs(hw, cq, is_rxq);
+
+	idpf_init_lock(&cq->cq_lock);
+
+	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
+
+	*cq_out = cq;
+	return status;
+
+init_dealloc_q_mem:
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+	idpf_free(hw, cq);
+	cq = NULL;
+
+	return status;
+}
+
+int
+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+		    struct idpf_ctlq_info **cq)
+{
+	return cpfl_ctlq_add(hw, qinfo, cq);
+}
+
+/**
+ * cpfl_ctlq_shutdown - shutdown the CQ
+ * The main shutdown routine for any controq queue
+ */
+static void
+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (!cq->ring_size)
+		goto shutdown_sq_out;
+
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+
+	/* Set ring_size to 0 to indicate uninitialized queue */
+	cq->ring_size = 0;
+
+shutdown_sq_out:
+	idpf_release_lock(&cq->cq_lock);
+	idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * cpfl_ctlq_remove - deallocate and remove specified control queue
+ */
+static void
+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	LIST_REMOVE(cq, cq_list);
+	cpfl_ctlq_shutdown(hw, cq);
+	idpf_free(hw, cq);
+}
+
+void
+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	cpfl_ctlq_remove(hw, cq);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
new file mode 100644
index 0000000000..930d717f63
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CONTROLQ_H_
+#define _CPFL_CONTROLQ_H_
+
+#include "base/idpf_osdep.h"
+#include "base/idpf_controlq_api.h"
+
+#define CPFL_CTLQ_DESCRIPTOR_SIZE	32
+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE	4096
+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE	256
+#define CPFL_DFLT_MBX_RING_LEN		512
+#define CPFL_CFGQ_RING_LEN		512
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct cpfl_ctlq_create_info {
+	enum idpf_ctlq_type type;
+	int id; /* absolute queue offset passed as input
+		 * -1 for default mailbox if present
+		 */
+	uint16_t len; /* Queue length passed as input */
+	uint16_t buf_size; /* buffer size passed as input */
+	uint64_t base_address; /* output, HPA of the Queue start  */
+	struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+	/* Pass down previously allocated descriptor ring and buffer memory
+	 * for each control queue to be created
+	 */
+	struct idpf_dma_mem ring_mem;
+	/* The CP will allocate one large buffer that the CPFlib will piece
+	 * into individual buffers for each descriptor
+	 */
+	struct idpf_dma_mem buf_mem;
+
+	int ext_info_size;
+	void *ext_info; /* Specific to q type */
+};
+
+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
+			     struct idpf_ctlq_info *cq,
+			     struct cpfl_ctlq_create_info *qinfo);
+int cpfl_ctlq_add(struct idpf_hw *hw,
+		  struct cpfl_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq);
+int cpfl_vport_ctlq_add(struct idpf_hw *hw,
+			struct cpfl_ctlq_create_info *qinfo,
+			struct idpf_ctlq_info **cq);
+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+#endif
diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c
new file mode 100644
index 0000000000..eefae1767c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.c
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include <base/idpf_controlq.h>
+#include <stdint.h>
+#include "cpfl_rules.h"
+
+ /**
+  * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor
+  */
+static inline uint64_t
+cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
+{
+	uint64_t context = 0;
+
+	switch (cmn_cfg->opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		/* fallthrough */
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+	case cpfl_ctlq_sem_add_rule:
+	case cpfl_ctlq_sem_del_rule:
+	case cpfl_ctlq_sem_query_rule:
+	case cpfl_ctlq_sem_update_rule:
+		context |= SHIFT_VAL64(cmn_cfg->time_sel,
+				       MEV_RULE_TIME_SEL);
+		context |= SHIFT_VAL64(cmn_cfg->time_sel_val,
+				       MEV_RULE_TIME_SEL_VAL);
+		context |= SHIFT_VAL64(cmn_cfg->host_id,
+				       MEV_RULE_HOST_ID);
+		context |= SHIFT_VAL64(cmn_cfg->port_num,
+				       MEV_RULE_PORT_NUM);
+		context |= SHIFT_VAL64(cmn_cfg->resp_req,
+				       MEV_RULE_RESP_REQ);
+		context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru,
+				       MEV_RULE_CACHE_WR_THRU);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc_ctx - get bit context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
+{
+	uint64_t context = 0;
+
+	context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common);
+
+	switch (cfg_data->common.opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size,
+				       MEV_RULE_MOD_OBJ_SIZE);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content,
+				       MEV_RULE_PIN_MOD_CONTENT);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.index,
+				       MEV_RULE_MOD_INDEX);
+		break;
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
+				       MEV_RULE_OBJ_ID);
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
+				       MEV_RULE_OBJ_ADDR);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc - build descriptor data from rule config data
+ *
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg)
+{
+	uint64_t context;
+	uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0];
+
+	context = cpfl_prep_rule_desc_ctx(cfg_data);
+	*ctlq_ctx = CPU_TO_LE64(context);
+	memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t));
+	ctlq_msg->opcode = (uint16_t)cfg_data->common.opc;
+	ctlq_msg->data_len = cfg_data->common.buf_len;
+	ctlq_msg->status = 0;
+	ctlq_msg->ctx.indirect.payload = cfg_data->common.payload;
+}
+
+/**
+ * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob)
+{
+	uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions;
+	const uint32_t *act_src = (const uint32_t *)act_bytes;
+	uint32_t i;
+
+	idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
+	idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len,
+		    CPFL_NONDMA_TO_DMA);
+
+	for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
+		*act_dst++ = CPU_TO_LE32(*act_src++);
+
+	*((uint16_t *)&rule_blob->sem_rule.cfg_ctrl) = CPU_TO_LE16(cfg_ctrl);
+}
diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h
new file mode 100644
index 0000000000..d23eae8e91
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_RULES_API_H_
+#define _CPFL_RULES_API_H_
+
+#include <base/idpf_controlq_api.h>
+#include "cpfl_actions.h"
+#include "cpfl_controlq.h"
+
+/* Common Bit Mask Macros */
+#define CPFL_BIT(b)			(1 << (b))
+
+#define MAKE_MASK(type, mask, shift)	((u##type) (mask) << (shift))
+#define SHIFT_VAL_LT(type, val, field)		\
+		(((u##type)(val) << field##_S) & field##_M)
+#define SHIFT_VAL_RT(type, val, field)		\
+		(((u##type)(val) & field##_M) >> field##_S)
+
+#define MAKE_MASK_VAL(type, bit_len)	(((u##type)0x01 << (bit_len)) - 1)
+#define MAKE_MASK_VAL16(bit_len)	MAKE_MASK_VAL(16, bit_len)
+#define MAKE_MASK_VAL64(bit_len)	MAKE_MASK_VAL(64, bit_len)
+
+#define MAKE_MASK64(mask, shift)	MAKE_MASK(64, mask, shift)
+#define MAKE_MASK16(mask, shift)	MAKE_MASK(16, mask, shift)
+#define MAKE_MASK32(mask, shift)	MAKE_MASK(32, mask, shift)
+
+/* Make masks with bit length and left-shifting count */
+#define MAKE_SMASK(type, bits, shift)	\
+	((((u##type)1 << (bits)) - 1) << (shift))
+#define MAKE_SMASK64(bits, shift)	MAKE_SMASK(64, bits, shift)
+#define MAKE_SMASK32(bits, shift)	MAKE_SMASK(32, bits, shift)
+#define MAKE_SMASK16(bits, shift)	MAKE_SMASK(16, bits, shift)
+
+#define SHIFT_VAL64(val, field)		SHIFT_VAL_LT(64, val, field)
+#define SHIFT_VAL32(val, field)		SHIFT_VAL_LT(32, val, field)
+#define SHIFT_VAL16(val, field)		SHIFT_VAL_LT(16, val, field)
+
+/* Rule Config queue opcodes */
+enum cpfl_ctlq_rule_cfg_opc {
+	cpfl_ctlq_sem_add_rule				= 0x1303,
+	cpfl_ctlq_sem_update_rule			= 0x1304,
+	cpfl_ctlq_sem_del_rule				= 0x1305,
+	cpfl_ctlq_sem_query_rule			= 0x1306,
+	cpfl_ctlq_sem_query_rule_hash_addr		= 0x1307,
+	cpfl_ctlq_sem_query_del_rule_hash_addr		= 0x1308,
+
+	cpfl_ctlq_mod_add_update_rule			= 0x1360,
+	cpfl_ctlq_mod_query_rule			= 0x1361,
+};
+
+enum cpfl_cfg_pkt_error_code {
+	CPFL_CFG_PKT_ERR_OK = 0,
+	CPFL_CFG_PKT_ERR_ESRCH = 1,     /* Bad opcode */
+	CPFL_CFG_PKT_ERR_EEXIST = 2,    /* Entry Already exists */
+	CPFL_CFG_PKT_ERR_ENOSPC = 4,    /* No space left in the table*/
+	CPFL_CFG_PKT_ERR_ERANGE = 5,    /* Parameter out of range */
+	CPFL_CFG_PKT_ERR_ESBCOMP = 6,   /* Completion Error */
+	CPFL_CFG_PKT_ERR_ENOPIN = 7,    /* Entry cannot be pinned in cache */
+	CPFL_CFG_PKT_ERR_ENOTFND = 8,   /* Entry Not exists */
+	CPFL_CFG_PKT_ERR_EMAXCOL = 9    /* Max Hash Collision */
+};
+
+/* macros for creating context for rule descriptor */
+#define MEV_RULE_VSI_ID_S		0
+#define MEV_RULE_VSI_ID_M		\
+		MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S)
+
+#define MEV_RULE_TIME_SEL_S		13
+#define MEV_RULE_TIME_SEL_M		\
+		MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S)
+
+#define MEV_RULE_TIME_SEL_VAL_S		15
+#define MEV_RULE_TIME_SEL_VAL_M		\
+		MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S)
+
+#define MEV_RULE_PORT_NUM_S		16
+#define MEV_RULE_HOST_ID_S		18
+#define MEV_RULE_PORT_NUM_M		\
+		MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S)
+#define MEV_RULE_HOST_ID_M		\
+		MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S)
+
+#define MEV_RULE_CACHE_WR_THRU_S	21
+#define MEV_RULE_CACHE_WR_THRU_M	\
+		MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S)
+
+#define MEV_RULE_RESP_REQ_S		22
+#define MEV_RULE_RESP_REQ_M		\
+		MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S)
+#define MEV_RULE_OBJ_ADDR_S		24
+#define MEV_RULE_OBJ_ADDR_M		\
+		MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S)
+#define MEV_RULE_OBJ_ID_S		59
+#define MEV_RULE_OBJ_ID_M		\
+		MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S)
+
+/* macros for creating CFG_CTRL for sem/lem rule blob */
+#define MEV_RULE_CFG_CTRL_PROF_ID_S			0
+#define MEV_RULE_CFG_CTRL_PROF_ID_M			\
+		MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S)
+
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S		11
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M		\
+		MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S)
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_S		13
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S)
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S	14
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M	\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S)
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S		15
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S)
+
+/**
+ * macro to build the CFG_CTRL for rule packet data, which is one of
+ * cpfl_prep_sem_rule_blob()'s input parameter.
+ */
+ /* build SEM CFG_CTRL*/
+#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id,		       \
+				       pin_to_cache, fixed_fetch)	       \
+		(SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID)	     | \
+		 SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID)   | \
+		 SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE)    | \
+		 SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH))
+
+/* build LEM CFG_CTRL*/
+#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror)    \
+		(SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID)             | \
+		 SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE)      | \
+		 SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR))
+
+/* macros for creating mod content config packets */
+#define MEV_RULE_MOD_INDEX_S		24
+#define MEV_RULE_MOD_INDEX_M		\
+		MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S)
+
+#define MEV_RULE_PIN_MOD_CONTENT_S	62
+#define MEV_RULE_PIN_MOD_CONTENT_M	\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S)
+#define MEV_RULE_MOD_OBJ_SIZE_S		63
+#define MEV_RULE_MOD_OBJ_SIZE_M		\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S)
+
+/**
+ * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM
+ * note: The key may be in mixed big/little endian format, the rest of members
+ * are in little endian
+ */
+struct cpfl_sem_rule_cfg_pkt {
+#define MEV_SEM_RULE_KEY_SIZE 128
+	uint8_t key[MEV_SEM_RULE_KEY_SIZE];
+
+#define MEV_SEM_RULE_ACT_SIZE 72
+	uint8_t actions[MEV_SEM_RULE_ACT_SIZE];
+
+	/* Bit(s):
+	 * 10:0 : PROFILE_ID
+	 * 12:11: SUB_PROF_ID (used for SEM only)
+	 * 13   : pin the SEM key content into the cache
+	 * 14   : Reserved
+	 * 15   : Fixed_fetch
+	 */
+	uint8_t cfg_ctrl[2];
+
+	/* Bit(s):
+	 * 0:     valid
+	 * 15:1:  Hints
+	 * 26:16: PROFILE_ID, the profile associated with the entry
+	 * 31:27: PF
+	 * 55:32: FLOW ID (assigned by HW)
+	 * 63:56: EPOCH
+	 */
+	uint8_t ctrl_word[8];
+	uint8_t padding[46];
+};
+
+/**
+ * union cpfl_rule_cfg_pkt_record - Describes rule data blob
+ */
+union cpfl_rule_cfg_pkt_record {
+	struct cpfl_sem_rule_cfg_pkt sem_rule;
+	uint8_t pkt_data[256];
+	uint8_t mod_blob[256];
+};
+
+/**
+ * cpfl_rule_query_addr - LEM/SEM Rule Query Address structure
+ */
+struct cpfl_rule_query_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_query_del_addr - Rule Query and Delete Address
+ */
+struct cpfl_rule_query_del_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_mod_content - MOD Rule Content
+ */
+struct cpfl_rule_mod_content {
+	uint8_t	obj_size;
+	uint8_t	pin_content;
+	uint32_t	index;
+};
+
+/**
+ * cpfl_rule_cfg_data_common - data struct for all rule opcodes
+ *note: some rules may only require part of structure
+ */
+struct cpfl_rule_cfg_data_common {
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	uint64_t	cookie;
+	uint16_t	vsi_id;
+	uint8_t	port_num;
+	uint8_t	host_id;
+	uint8_t	time_sel;
+	uint8_t	time_sel_val;
+	uint8_t	cache_wr_thru;
+	uint8_t	resp_req;
+	uint32_t	ret_val;
+	uint16_t	buf_len;
+	struct idpf_dma_mem *payload;
+};
+
+/**
+ * cpfl_rule_cfg_data - rule config data
+ * note: Before sending rule to HW, caller needs to fill
+ *       in this struct then call cpfl_prep_rule_desc().
+ */
+struct cpfl_rule_cfg_data {
+	struct cpfl_rule_cfg_data_common common;
+	union {
+		struct cpfl_rule_query_addr query_addr;
+		struct cpfl_rule_query_del_addr query_del_addr;
+		struct cpfl_rule_mod_content mod_content;
+	} ext;
+};
+
+/**
+ * cpfl_fill_rule_mod_content - fill info for mod content
+ */
+static inline void
+cpfl_fill_rule_mod_content(uint8_t mod_obj_size,
+			   uint8_t pin_mod_content,
+			   uint32_t mod_index,
+			   struct cpfl_rule_mod_content *mod_content)
+{
+	mod_content->obj_size = mod_obj_size;
+	mod_content->pin_content = pin_mod_content;
+	mod_content->index = mod_index;
+}
+
+/**
+ * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes
+ * note: call this function before calls cpfl_prep_rule_desc()
+ */
+static inline void
+cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc,
+			       uint64_t cookie,
+			       uint16_t vsi_id,
+			       uint8_t port_num,
+			       uint8_t host_id,
+			       uint8_t time_sel,
+			       uint8_t time_sel_val,
+			       uint8_t cache_wr_thru,
+			       uint8_t resp_req,
+			       uint16_t payload_len,
+			       struct idpf_dma_mem *payload,
+			       struct cpfl_rule_cfg_data_common *cfg_cmn)
+{
+	cfg_cmn->opc = opc;
+	cfg_cmn->cookie = cookie;
+	cfg_cmn->vsi_id = vsi_id;
+	cfg_cmn->port_num = port_num;
+	cfg_cmn->resp_req = resp_req;
+	cfg_cmn->ret_val = 0;
+	cfg_cmn->host_id = host_id;
+	cfg_cmn->time_sel = time_sel;
+	cfg_cmn->time_sel_val = time_sel_val;
+	cfg_cmn->cache_wr_thru = cache_wr_thru;
+
+	cfg_cmn->buf_len = payload_len;
+	cfg_cmn->payload = payload;
+}
+
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg);
+
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob);
+
+#endif /* _CPFL_RULES_API_H_ */
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 7b8d043011..9a8d25ffae 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -18,6 +18,7 @@ sources = files(
         'cpfl_rxtx.c',
         'cpfl_representor.c',
         'cpfl_vchnl.c',
+        'cpfl_controlq.c',
 )
 
 if arch_subdir == 'x86'
@@ -43,6 +44,7 @@ js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
     sources += files(
         'cpfl_flow_parser.c',
+        'cpfl_rules.c',
     )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v3 4/9] net/cpfl: setup ctrl path
  2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
                       ` (3 preceding siblings ...)
  2023-09-06  9:34     ` [PATCH v3 3/9] net/cpfl: add FXP low level implementation Wenjing Qiao
@ 2023-09-06  9:34     ` Wenjing Qiao
  2023-09-11  6:30       ` Liu, Mingxia
  2023-09-11  6:36       ` Wu, Jingjing
  2023-09-06  9:34     ` [PATCH v3 5/9] net/cpfl: set up rte flow skeleton Wenjing Qiao
                       ` (5 subsequent siblings)
  10 siblings, 2 replies; 128+ messages in thread
From: Wenjing Qiao @ 2023-09-06  9:34 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing
  Cc: mingxia.liu, Wenjing Qiao

Setup the control vport and control queue for flow offloading.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 267 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_ethdev.h |  14 ++
 drivers/net/cpfl/cpfl_vchnl.c  | 144 ++++++++++++++++++
 3 files changed, 425 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 3c4a6a4724..22f3e72894 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1657,6 +1657,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 		return;
 	}
 
+	/* ignore if it is ctrl vport */
+	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
+		return;
+
 	vport = cpfl_find_vport(adapter, vc_event->vport_id);
 	if (!vport) {
 		PMD_DRV_LOG(ERR, "Can't find vport.");
@@ -1852,6 +1856,260 @@ cpfl_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 }
 
+static int
+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	ret = cpfl_config_ctlq_tx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
+		return ret;
+	}
+
+	ret = cpfl_config_ctlq_rx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
+		return ret;
+	}
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	int i;
+
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+}
+
+static int
+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_ctlq_info *cfg_cq;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
+					  &adapter->cfgq_info[i],
+					  &cfg_cq);
+		if (ret || !cfg_cq) {
+			PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d",
+				    adapter->cfgq_info[i].id);
+			cpfl_remove_cfgqs(adapter);
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
+			    adapter->cfgq_info[i].id);
+		adapter->ctlqp[i] = cfg_cq;
+	}
+
+	return ret;
+}
+
+#define CPFL_CFGQ_RING_LEN		512
+#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
+#define CPFL_CFGQ_BUFFER_SIZE		256
+#define CPFL_CFGQ_RING_SIZE		512
+
+static int
+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	struct cpfl_vport *vport;
+	int i, err;
+	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);
+	uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;
+
+	vport = &adapter->ctrl_vport;
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (i % 2 == 0) {
+			/* Setup Tx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +
+				i / 2 * vport->base.chunks_info.tx_qtail_spacing;
+		} else {
+			/* Setup Rx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +
+				i / 2 * vport->base.chunks_info.rx_qtail_spacing;
+			if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,
+						buf_size)) {
+				err = -ENOMEM;
+				goto free_mem;
+			}
+		}
+		if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,
+					ring_size)) {
+			err = -ENOMEM;
+			goto free_mem;
+		}
+	}
+	return 0;
+free_mem:
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+	return err;
+}
+
+static int
+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_create_vport *vport_info =
+		(struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;
+	int i;
+
+	vport->itf.adapter = adapter;
+	vport->base.adapter = &adapter->base;
+	vport->base.vport_id = vport_info->vport_id;
+
+	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+		if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {
+			vport->base.chunks_info.tx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.tx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.tx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {
+			vport->base.chunks_info.rx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.rx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.rx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else {
+			PMD_INIT_LOG(ERR, "Unsupported chunk type");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)
+{
+	cpfl_remove_cfgqs(adapter);
+	cpfl_stop_cfgqs(adapter);
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+}
+
+static int
+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_vc_create_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to create control vport");
+		return ret;
+	}
+
+	ret = cpfl_init_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init control vport");
+		goto err_init_ctrl_vport;
+	}
+
+	ret = cpfl_cfgq_setup(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control queues");
+		goto err_cfgq_setup;
+	}
+
+	ret = cpfl_add_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to add control queues");
+		goto err_add_cfgq;
+	}
+
+	ret = cpfl_start_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to start control queues");
+		goto err_start_cfgqs;
+	}
+
+	return 0;
+
+err_start_cfgqs:
+	cpfl_stop_cfgqs(adapter);
+err_add_cfgq:
+	cpfl_remove_cfgqs(adapter);
+err_cfgq_setup:
+err_init_ctrl_vport:
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+
+	return ret;
+}
+
 static struct virtchnl2_get_capabilities req_caps = {
 	.csum_caps =
 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
@@ -2019,6 +2277,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+	ret = cpfl_ctrl_path_open(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control path");
+		goto err_create_ctrl_vport;
+	}
+
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2026,6 +2290,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+err_create_ctrl_vport:
+	rte_free(adapter->vports);
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_whitelist_uninit(adapter);
@@ -2260,6 +2526,7 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 2151605987..40bba8da00 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -22,6 +22,7 @@
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
 #include "cpfl_representor.h"
+#include "cpfl_controlq.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -89,6 +90,10 @@
 
 #define CPFL_FLOW_FILE_LEN 100
 
+#define CPFL_RX_CFGQ_NUM	4
+#define CPFL_TX_CFGQ_NUM	4
+#define CPFL_CFGQ_NUM		8
+
 #define CPFL_INVALID_HW_ID	UINT16_MAX
 #define CPFL_META_CHUNK_LENGTH	1024
 #define CPFL_META_LENGTH	32
@@ -204,11 +209,20 @@ struct cpfl_adapter_ext {
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_whitelist_hash;
 
+	/* ctrl vport and ctrl queues. */
+	struct cpfl_vport ctrl_vport;
+	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
+	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
+	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
+
 	struct cpfl_metadata meta;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
 int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
 			   struct cpfl_vport_id *vport_identity,
 			   struct cpchnl2_vport_info *vport_info);
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
index a21a4a451f..932840a972 100644
--- a/drivers/net/cpfl/cpfl_vchnl.c
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 
 	return 0;
 }
+
+int
+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct virtchnl2_create_vport vport_msg;
+	struct idpf_cmd_info args;
+	int err = -1;
+
+	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
+	vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+	vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
+	vport_msg.num_tx_complq = 0;
+	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
+	vport_msg.num_rx_bufq = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+	args.in_args = (uint8_t *)&vport_msg;
+	args.in_args_size = sizeof(vport_msg);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+		return err;
+	}
+
+	rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
+		   IDPF_DFLT_MBX_BUF_SIZE);
+	return err;
+}
+
+int
+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_RX_CFGQ_NUM;
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (!vc_rxqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->base.vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		rxq_info = &vc_rxqs->qinfo[i];
+		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
+		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;
+		rxq_info->max_pkt_size = vport->base.max_pkt_len;
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_rxqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_TX_CFGQ_NUM;
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (!vc_txqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->base.vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		txq_info = &vc_txqs->qinfo[i];
+		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
+		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_txqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v3 5/9] net/cpfl: set up rte flow skeleton
  2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
                       ` (4 preceding siblings ...)
  2023-09-06  9:34     ` [PATCH v3 4/9] net/cpfl: setup ctrl path Wenjing Qiao
@ 2023-09-06  9:34     ` Wenjing Qiao
  2023-09-06  9:34     ` [PATCH v3 6/9] net/cpfl: add fxp rule module Wenjing Qiao
                       ` (4 subsequent siblings)
  10 siblings, 0 replies; 128+ messages in thread
From: Wenjing Qiao @ 2023-09-06  9:34 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Set up the rte_flow backend skeleton. Introduce the framework
to support different engines as rte_flow backend. Bridge rte_flow
driver API to flow engines.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  54 ++++++
 drivers/net/cpfl/cpfl_ethdev.h |   5 +
 drivers/net/cpfl/cpfl_flow.c   | 339 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow.h   |  85 +++++++++
 drivers/net/cpfl/meson.build   |   3 +-
 5 files changed, 485 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 22f3e72894..618a6a0fe2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -15,6 +15,7 @@
 #include "cpfl_ethdev.h"
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1074,6 +1075,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+	struct rte_flow *p_flow;
+
+	while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+		TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+		if (p_flow->engine->free)
+			p_flow->engine->free(p_flow);
+		rte_free(p_flow);
+	}
+}
+
 static int
 cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
 {
@@ -1105,6 +1119,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
 		cpfl_p2p_queue_grps_del(vport);
 
+	cpfl_flow_free(cpfl_vport);
 	idpf_vport_deinit(vport);
 	rte_free(cpfl_vport->p2p_q_chunks_info);
 
@@ -1117,6 +1132,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+		      const struct rte_flow_ops **ops)
+{
+	struct cpfl_itf *itf;
+
+	if (!dev)
+		return -EINVAL;
+
+	itf = CPFL_DEV_TO_ITF(dev);
+
+	/* only vport support rte_flow */
+	if (itf->type != CPFL_ITF_TYPE_VPORT)
+		return -ENOTSUP;
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static int
 cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 			    size_t len, uint32_t tx)
@@ -1318,6 +1356,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.xstats_get			= cpfl_dev_xstats_get,
 	.xstats_get_names		= cpfl_dev_xstats_get_names,
 	.xstats_reset			= cpfl_dev_xstats_reset,
+	.flow_ops_get			= cpfl_dev_flow_ops_get,
 	.hairpin_cap_get		= cpfl_hairpin_cap_get,
 	.rx_hairpin_queue_setup		= cpfl_rx_hairpin_queue_setup,
 	.tx_hairpin_queue_setup		= cpfl_tx_hairpin_queue_setup,
@@ -2283,6 +2322,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_create_ctrl_vport;
 	}
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	ret = cpfl_flow_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init flow module");
+		goto err_flow_init;
+	}
+#endif
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2290,6 +2336,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+err_flow_init:
+	cpfl_ctrl_path_close(adapter);
+#endif
 err_create_ctrl_vport:
 	rte_free(adapter->vports);
 err_vports_alloc:
@@ -2446,6 +2496,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
 	cpfl_vport->itf.adapter = adapter;
 	cpfl_vport->itf.data = dev->data;
+	TAILQ_INIT(&cpfl_vport->itf.flow_list);
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
@@ -2526,6 +2577,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	cpfl_flow_uninit(adapter);
+#endif
 	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 40bba8da00..be625284a4 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -147,9 +147,12 @@ enum cpfl_itf_type {
 	CPFL_ITF_TYPE_REPRESENTOR
 };
 
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
+	struct cpfl_flow_list flow_list;
 	void *data;
 };
 
@@ -209,6 +212,8 @@ struct cpfl_adapter_ext {
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_whitelist_hash;
 
+	struct cpfl_flow_js_parser *flow_parser;
+
 	/* ctrl vport and ctrl queues. */
 	struct cpfl_vport ctrl_vport;
 	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c
new file mode 100644
index 0000000000..03dd1ffa44
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.c
@@ -0,0 +1,339 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+	TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->parse_pattern_action)
+			continue;
+
+		if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+			continue;
+		return engine;
+	}
+
+	return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+	int ret;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->init) {
+			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+				     engine->type);
+			return -ENOTSUP;
+		}
+
+		ret = engine->init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+				     engine->type);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (engine->uninit)
+			engine->uninit(adapter);
+	}
+}
+
+static int
+cpfl_flow_attr_valid(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	if (attr->priority > CPFL_PREC_MAX) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Only support priority 0-7.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	int ret;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = cpfl_flow_attr_valid(attr, error);
+	if (ret)
+		return ret;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+__cpfl_flow_validate(struct rte_eth_dev *dev,
+		     const struct rte_flow_attr *attr,
+		     const struct rte_flow_item pattern[],
+		     const struct rte_flow_action actions[],
+		     void **meta,
+		     struct cpfl_flow_engine **engine,
+		     struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+	if (ret)
+		return ret;
+
+	*engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta);
+	if (!*engine) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched engine.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	int ret;
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error);
+
+	return ret;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_engine *engine = NULL;
+	struct rte_flow *flow;
+	void *meta;
+	int ret;
+
+	flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return NULL;
+	}
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	if (!engine->create) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched flow creation function");
+		rte_free(flow);
+		return NULL;
+	}
+
+	ret = engine->create(dev, flow, meta, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	flow->engine = engine;
+	TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+	return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	int ret = 0;
+
+	if (!flow || !flow->engine || !flow->engine->destroy) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	ret = flow->engine->destroy(dev, flow, error);
+	if (!ret)
+		TAILQ_REMOVE(&itf->flow_list, flow, next);
+	else
+		PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+	return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct rte_flow *p_flow;
+	void *temp;
+	int ret = 0;
+
+	RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+		ret = cpfl_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to flush flows");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		const struct rte_flow_action *actions,
+		void *data,
+		struct rte_flow_error *error)
+{
+	struct rte_flow_query_count *count = data;
+	int ret = -EINVAL;
+
+	if (!flow || !flow->engine || !flow->engine->query_count) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			ret = flow->engine->query_count(dev, flow, count, error);
+			break;
+		default:
+			ret = rte_flow_error_set(error, ENOTSUP,
+						 RTE_FLOW_ERROR_TYPE_ACTION,
+						 actions,
+						 "action not supported");
+			break;
+		}
+	}
+
+	return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+	.validate = cpfl_flow_validate,
+	.create = cpfl_flow_create,
+	.destroy = cpfl_flow_destroy,
+	.flush = cpfl_flow_flush,
+	.query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+	int ret;
+
+	if (ad->devargs.flow_parser[0] == '\0') {
+		PMD_INIT_LOG(WARNING, "flow module is not initialized");
+		return 0;
+	}
+
+	ret = cpfl_flow_engine_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init flow engines");
+		goto err;
+	}
+
+	ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to create flow parser");
+		goto err;
+	}
+
+	return ret;
+
+err:
+	cpfl_flow_engine_uninit(ad);
+	return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+	if (ad->devargs.flow_parser[0] == '\0')
+		return;
+
+	cpfl_parser_destroy(ad->flow_parser);
+	cpfl_flow_engine_uninit(ad);
+}
diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h
new file mode 100644
index 0000000000..8c19b853ca
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.h
@@ -0,0 +1,85 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#define CPFL_PREC_MAX 7
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+	CPFL_FLOW_ENGINE_NONE = 0,
+	CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+			       struct rte_flow *flow,
+			       void *meta,
+			       struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+				struct rte_flow *flow,
+				struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+			      struct rte_flow *flow,
+			      struct rte_flow_query_count *count,
+			      struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+					     const struct rte_flow_attr *attr,
+					     const struct rte_flow_item pattern[],
+					     const struct rte_flow_action actions[],
+					     void **meta);
+
+struct cpfl_flow_engine {
+	TAILQ_ENTRY(cpfl_flow_engine) node;
+	enum cpfl_flow_engine_type type;
+	engine_init_t init;
+	engine_uninit_t uninit;
+	engine_create_t create;
+	engine_destroy_t destroy;
+	engine_query_t query_count;
+	engine_free_t free;
+	engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) next;
+	struct cpfl_flow_engine *engine;
+	void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev,
+						const struct rte_flow_attr *attr,
+						const struct rte_flow_item pattern[],
+						const struct rte_flow_action actions[],
+						void **meta);
+int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+				  const struct rte_flow_attr *attr,
+				  const struct rte_flow_item pattern[],
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+		    struct rte_flow *flow,
+		    const struct rte_flow_action *actions,
+		    void *data,
+		    struct rte_flow_error *error);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 9a8d25ffae..4951ea1c4a 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -43,9 +43,10 @@ endif
 js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
     sources += files(
+        'cpfl_flow.c',
         'cpfl_flow_parser.c',
         'cpfl_rules.c',
     )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
-endif
\ No newline at end of file
+endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v3 6/9] net/cpfl: add fxp rule module
  2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
                       ` (5 preceding siblings ...)
  2023-09-06  9:34     ` [PATCH v3 5/9] net/cpfl: set up rte flow skeleton Wenjing Qiao
@ 2023-09-06  9:34     ` Wenjing Qiao
  2023-09-12  7:40       ` FW: " Liu, Mingxia
  2023-09-06  9:34     ` [PATCH v3 7/9] net/cpfl: add fxp flow engine Wenjing Qiao
                       ` (3 subsequent siblings)
  10 siblings, 1 reply; 128+ messages in thread
From: Wenjing Qiao @ 2023-09-06  9:34 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Added low level fxp module for rule packing / creation / destroying.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_controlq.c | 424 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  24 ++
 drivers/net/cpfl/cpfl_ethdev.c   |  31 +++
 drivers/net/cpfl/cpfl_ethdev.h   |   6 +
 drivers/net/cpfl/cpfl_fxp_rule.c | 297 ++++++++++++++++++++++
 drivers/net/cpfl/cpfl_fxp_rule.h |  68 +++++
 drivers/net/cpfl/meson.build     |   1 +
 7 files changed, 851 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h

diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
index 476c78f235..ed76282b0c 100644
--- a/drivers/net/cpfl/cpfl_controlq.c
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -331,6 +331,402 @@ cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
 	return status;
 }
 
+/**
+ * cpfl_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+	       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_desc *desc;
+	int num_desc_avail = 0;
+	int status = 0;
+	int i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	/* Ensure there are enough descriptors to send all messages */
+	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+		status = -ENOSPC;
+		goto sq_send_command_out;
+	}
+
+	for (i = 0; i < num_q_msg; i++) {
+		struct idpf_ctlq_msg *msg = &q_msg[i];
+		uint64_t msg_cookie;
+
+		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+		desc->opcode = CPU_TO_LE16(msg->opcode);
+		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+		msg_cookie = *(uint64_t *)&msg->cookie;
+		desc->cookie_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));
+		desc->cookie_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));
+		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+				IDPF_CTLQ_FLAG_HOST_ID_S);
+		if (msg->data_len) {
+			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+			desc->datalen |= CPU_TO_LE16(msg->data_len);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+			/* Update the address values in the desc with the pa
+			 * value for respective buffer
+			 */
+			desc->params.indirect.addr_high =
+				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+			desc->params.indirect.addr_low =
+				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		} else {
+			idpf_memcpy(&desc->params, msg->ctx.direct,
+				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		}
+
+		/* Store buffer info */
+		cq->bi.tx_msg[cq->next_to_use] = msg;
+		(cq->next_to_use)++;
+		if (cq->next_to_use == cq->ring_size)
+			cq->next_to_use = 0;
+	}
+
+	/* Force memory write to complete before letting hardware
+	 * know that there are new descriptors to fetch.
+	 */
+	idpf_wmb();
+	wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+	idpf_release_lock(&cq->cq_lock);
+
+	return status;
+}
+
+/**
+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
+ * back for the requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ * @force: (input) clean descriptors which were not done yet. Use with caution
+ * in kernel mode only
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+static int
+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		     struct idpf_ctlq_msg *msg_status[], bool force)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t i = 0, num_to_clean;
+	uint16_t ntc, desc_err;
+	int ret = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*clean_count == 0)
+		return 0;
+	if (*clean_count > cq->ring_size)
+		return -EINVAL;
+
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *clean_count;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		desc_err = LE16_TO_CPU(desc->ret_val);
+		if (desc_err) {
+			/* strip off FW internal code */
+			desc_err &= 0xff;
+		}
+
+		msg_status[i] = cq->bi.tx_msg[ntc];
+		if (!msg_status[i])
+			break;
+		msg_status[i]->status = desc_err;
+		cq->bi.tx_msg[ntc] = NULL;
+		/* Zero out any stale data */
+		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	}
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+
+	/* Return number of descriptors actually cleaned */
+	*clean_count = i;
+
+	return ret;
+}
+
+/**
+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		   struct idpf_ctlq_msg *msg_status[])
+{
+	return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
+}
+
+/**
+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t ntp = cq->next_to_post;
+	bool buffs_avail = false;
+	uint16_t tbp = ntp + 1;
+	int status = 0;
+	int i = 0;
+
+	if (*buff_count > cq->ring_size)
+		return -EINVAL;
+
+	if (*buff_count > 0)
+		buffs_avail = true;
+	idpf_acquire_lock(&cq->cq_lock);
+	if (tbp >= cq->ring_size)
+		tbp = 0;
+
+	if (tbp == cq->next_to_clean)
+		/* Nothing to do */
+		goto post_buffs_out;
+
+	/* Post buffers for as many as provided or up until the last one used */
+	while (ntp != cq->next_to_clean) {
+		desc = IDPF_CTLQ_DESC(cq, ntp);
+		if (cq->bi.rx_buff[ntp])
+			goto fill_desc;
+		if (!buffs_avail) {
+			/* If the caller hasn't given us any buffers or
+			 * there are none left, search the ring itself
+			 * for an available buffer to move to this
+			 * entry starting at the next entry in the ring
+			 */
+			tbp = ntp + 1;
+			/* Wrap ring if necessary */
+			if (tbp >= cq->ring_size)
+				tbp = 0;
+
+			while (tbp != cq->next_to_clean) {
+				if (cq->bi.rx_buff[tbp]) {
+					cq->bi.rx_buff[ntp] =
+						cq->bi.rx_buff[tbp];
+					cq->bi.rx_buff[tbp] = NULL;
+
+					/* Found a buffer, no need to
+					 * search anymore
+					 */
+					break;
+				}
+
+				/* Wrap ring if necessary */
+				tbp++;
+				if (tbp >= cq->ring_size)
+					tbp = 0;
+			}
+
+			if (tbp == cq->next_to_clean)
+				goto post_buffs_out;
+		} else {
+			/* Give back pointer to DMA buffer */
+			cq->bi.rx_buff[ntp] = buffs[i];
+			i++;
+
+			if (i >= *buff_count)
+				buffs_avail = false;
+		}
+
+fill_desc:
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+		/* Post buffers to descriptor */
+		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+		ntp++;
+		if (ntp == cq->ring_size)
+			ntp = 0;
+	}
+
+post_buffs_out:
+	/* Only update tail if buffers were actually posted */
+	if (cq->next_to_post != ntp) {
+		if (ntp)
+			/* Update next_to_post to ntp - 1 since current ntp
+			 * will not have a buffer
+			 */
+			cq->next_to_post = ntp - 1;
+		else
+			/* Wrap to end of end ring since current ntp is 0 */
+			cq->next_to_post = cq->ring_size - 1;
+
+		wr32(hw, cq->reg.tail, cq->next_to_post);
+	}
+
+	idpf_release_lock(&cq->cq_lock);
+	/* return the number of buffers that were not posted */
+	*buff_count = *buff_count - i;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+	       struct idpf_ctlq_msg *q_msg)
+{
+	uint16_t num_to_clean, ntc, ret_val, flags;
+	struct idpf_ctlq_desc *desc;
+	int ret_code = 0;
+	uint16_t i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*num_q_msg == 0)
+		return 0;
+	else if (*num_q_msg > cq->ring_size)
+		return -EINVAL;
+
+	/* take the lock before we start messing with the ring */
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *num_q_msg;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		flags = LE16_TO_CPU(desc->flags);
+		if (!(flags & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		ret_val = LE16_TO_CPU(desc->ret_val);
+		q_msg[i].vmvf_type = (flags &
+				     (IDPF_CTLQ_FLAG_FTYPE_VM |
+				      IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+				      IDPF_CTLQ_FLAG_FTYPE_S;
+
+		if (flags & IDPF_CTLQ_FLAG_ERR)
+			ret_code = -EBADMSG;
+
+		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
+		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
+		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+		q_msg[i].status = ret_val;
+
+		if (desc->datalen) {
+			idpf_memcpy(q_msg[i].ctx.indirect.context,
+				    &desc->params.indirect,
+				    IDPF_INDIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+
+			/* Assign pointer to dma buffer to ctlq_msg array
+			 * to be given to upper layer
+			 */
+			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+			/* Zero out pointer to DMA buffer info;
+			 * will be repopulated by post buffers API
+			 */
+			cq->bi.rx_buff[ntc] = NULL;
+		} else {
+			idpf_memcpy(q_msg[i].ctx.direct,
+				    desc->params.raw,
+				    IDPF_DIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+		}
+
+		/* Zero out stale data in descriptor */
+		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+			    IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	};
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+	*num_q_msg = i;
+	if (*num_q_msg == 0)
+		ret_code = -ENOMSG;
+
+	return ret_code;
+}
+
 int
 cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
 		    struct idpf_ctlq_info **cq)
@@ -377,3 +773,31 @@ cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
 {
 	cpfl_ctlq_remove(hw, cq);
 }
+
+int
+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		     struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			      uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
+
+int
+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			 struct idpf_ctlq_msg *msg_status[])
+{
+	return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
index 930d717f63..740ae6522c 100644
--- a/drivers/net/cpfl/cpfl_controlq.h
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -14,6 +14,13 @@
 #define CPFL_DFLT_MBX_RING_LEN		512
 #define CPFL_CFGQ_RING_LEN		512
 
+/* CRQ/CSQ specific error codes */
+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */
+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */
+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */
+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */
+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */
+
 /* Generic queue info structures */
 /* MB, CONFIG and EVENT q do not have extended info */
 struct cpfl_ctlq_create_info {
@@ -44,8 +51,25 @@ int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
 int cpfl_ctlq_add(struct idpf_hw *hw,
 		  struct cpfl_ctlq_create_info *qinfo,
 		  struct idpf_ctlq_info **cq);
+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+		       struct idpf_ctlq_msg *msg_status[]);
+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			    u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg);
 int cpfl_vport_ctlq_add(struct idpf_hw *hw,
 			struct cpfl_ctlq_create_info *qinfo,
 			struct idpf_ctlq_info **cq);
 void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			 u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+			 struct idpf_ctlq_msg q_msg[]);
+
+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+				  u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+			     struct idpf_ctlq_msg *msg_status[]);
 #endif
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 618a6a0fe2..08a55f0352 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 #include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
 	adapter->vports[vport->sw_idx] = NULL;
+	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
 	rte_free(cpfl_vport);
 
 	return 0;
@@ -2462,6 +2464,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
 	return 0;
 }
 
+int
+cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size,
+			 int batch_size)
+{
+	int i;
+
+	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
+		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < batch_size; i++) {
+		dma[i].va = (void *)((uint64_t)orig_dma->va + size * (i + 1));
+		dma[i].pa = orig_dma->pa + size * (i + 1);
+		dma[i].size = size;
+		dma[i].zone = NULL;
+	}
+	return 0;
+}
+
 static int
 cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
@@ -2511,6 +2533,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
 			    &dev->data->mac_addrs[0]);
 
+	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
+	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
+				       cpfl_vport->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		goto err_mac_addrs;
+
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
 		memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
 		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index be625284a4..6b02573b4a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -149,10 +149,14 @@ enum cpfl_itf_type {
 
 TAILQ_HEAD(cpfl_flow_list, rte_flow);
 
+#define CPFL_FLOW_BATCH_SIZE  490
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
 	struct cpfl_flow_list flow_list;
+	struct idpf_dma_mem flow_dma;
+	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
 	void *data;
 };
 
@@ -238,6 +242,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 			   struct cpchnl2_vport_id *vport_id,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_info_response *response);
+int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma,
+			     uint32_t size, int batch_size);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
new file mode 100644
index 0000000000..f87ccc9f77
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.c
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		   struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_msg **msg_ptr_list;
+	u16 clean_count = 0;
+	int num_cleaned = 0;
+	int retries = 0;
+	int ret = 0;
+
+	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+	if (!msg_ptr_list) {
+		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+		goto send_err;
+	}
+
+	while (retries <= CTLQ_SEND_RETRIES) {
+		clean_count = num_q_msg - num_cleaned;
+		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+					       &msg_ptr_list[num_cleaned]);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+			goto send_err;
+		}
+
+		num_cleaned += clean_count;
+		retries++;
+		if (num_cleaned >= num_q_msg)
+			break;
+		rte_delay_us_sleep(10);
+	}
+
+	if (retries > CTLQ_SEND_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for completions");
+		ret = -1;
+		goto send_err;
+	}
+
+send_err:
+	if (msg_ptr_list)
+		free(msg_ptr_list);
+err:
+	return ret;
+}
+
+static int
+cpfl_process_rx_ctlq_msg(u16 num_q_msg, struct idpf_ctlq_msg *q_msg)
+{
+	u16 i;
+	int ret = 0;
+
+	if (!num_q_msg || !q_msg)
+		return -EINVAL;
+
+	for (i = 0; i < num_q_msg; i++) {
+		if (q_msg[i].status == CPFL_CFG_PKT_ERR_OK) {
+			continue;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_EEXIST &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_add_rule) {
+			PMD_INIT_LOG(ERR, "The rule has confliction with already existed one");
+			return -EINVAL;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_ENOTFND &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_del_rule) {
+			PMD_INIT_LOG(ERR, "The rule has already deleted");
+			return -EINVAL;
+		} else {
+			PMD_INIT_LOG(ERR, "Invalid rule");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		      struct idpf_ctlq_msg q_msg[])
+{
+	int retries = 0;
+	struct idpf_dma_mem *dma;
+	u16 i;
+	uint16_t buff_cnt;
+	int ret = 0, handle_rule = 0;
+
+	retries = 0;
+	while (retries <= CTLQ_RECEIVE_RETRIES) {
+		rte_delay_us_sleep(10);
+		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+		    ret != CPFL_ERR_CTLQ_ERROR) {
+			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_EMPTY)
+			break;
+
+		ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
+		if (ret) {
+			PMD_INIT_LOG(WARNING, "failed to process rx_ctrlq msg");
+			handle_rule = ret;
+		}
+
+		for (i = 0; i < num_q_msg; i++) {
+			if (q_msg[i].data_len > 0)
+				dma = q_msg[i].ctx.indirect.payload;
+			else
+				dma = NULL;
+
+			buff_cnt = dma ? 1 : 0;
+			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+			if (ret)
+				PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+		}
+		break;
+	}
+
+	if (retries > CTLQ_RECEIVE_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+		ret = -1;
+	}
+
+	return ret + handle_rule;
+}
+
+static int
+cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		   struct idpf_ctlq_msg *msg)
+{
+	struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	struct cpfl_rule_cfg_data cfg = {0};
+
+	/* prepare rule blob */
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(&cfg, 0, sizeof(cfg));
+
+	/* fill info for both query and add/update */
+	cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+				   minfo->pin_mod_content,
+				   minfo->mod_index,
+				   &cfg.ext.mod_content);
+
+	/* only fill content for add/update */
+	memcpy(blob->mod_blob, minfo->mod_content,
+	       minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+	/* pack message */
+	cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+				       rinfo->cookie,
+				       0, /* vsi_id not used for mod */
+				       rinfo->port_num,
+				       NO_HOST_NEEDED,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       (u16)sizeof(*blob),
+				       (void *)dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		       struct idpf_ctlq_msg *msg, bool add)
+{
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	struct cpfl_rule_cfg_data cfg;
+	uint16_t cfg_ctrl;
+
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(msg, 0, sizeof(*msg));
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+							  rinfo->sem.sub_prof_id,
+							  rinfo->sem.pin_to_cache,
+							  rinfo->sem.fixed_fetch);
+		cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+					rinfo->act_bytes, rinfo->act_byte_len,
+					cfg_ctrl, blob);
+		opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+	} else {
+		PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
+		return -1;
+	}
+
+	cpfl_fill_rule_cfg_data_common(opc,
+				       rinfo->cookie,
+				       rinfo->vsi,
+				       rinfo->port_num,
+				       rinfo->host_id,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+	       struct idpf_ctlq_msg *msg, bool add)
+{
+	int ret = 0;
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
+			ret = -1;
+	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+		if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)
+			ret = -1;
+	} else {
+		PMD_INIT_LOG(ERR, "Invalid type of rule");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+int
+cpfl_rule_process(struct cpfl_itf *itf,
+		  struct idpf_ctlq_info *tx_cq,
+		  struct idpf_ctlq_info *rx_cq,
+		  struct cpfl_rule_info *rinfo,
+		  int rule_num,
+		  bool add)
+{
+	struct idpf_hw *hw = &itf->adapter->base.hw;
+	int i;
+	int ret = 0;
+
+	if (rule_num == 0)
+		return 0;
+
+	for (i = 0; i < rule_num; i++) {
+		ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Could not pack rule");
+			return ret;
+		}
+	}
+	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to send control message");
+		return ret;
+	}
+	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to update rule");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
new file mode 100644
index 0000000000..ed757b80b1
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+	uint16_t prof_id;
+	uint8_t sub_prof_id;
+	uint8_t key[CPFL_MAX_KEY_LEN];
+	uint8_t key_byte_len;
+	uint8_t pin_to_cache;
+	uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+	uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+	uint8_t mod_content_byte_len;
+	uint32_t mod_index;
+	uint8_t pin_mod_content;
+	uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+	CPFL_RULE_TYPE_NONE,
+	CPFL_RULE_TYPE_SEM,
+	CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+	enum cpfl_rule_type type;
+	uint64_t cookie;
+	uint8_t host_id;
+	uint8_t port_num;
+	uint8_t resp_req;
+	/* TODO: change this to be dynamically allocated/reallocated */
+	uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+	uint8_t act_byte_len;
+	/* vsi is used for lem and lpm rules */
+	uint16_t vsi;
+	uint8_t clear_mirror_1st_state;
+	/* mod related fields */
+	union {
+		struct cpfl_mod_rule_info mod;
+		struct cpfl_sem_rule_info sem;
+	};
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_process(struct cpfl_itf *itf,
+		      struct idpf_ctlq_info *tx_cq,
+		      struct idpf_ctlq_info *rx_cq,
+		      struct cpfl_rule_info *rinfo,
+		      int rule_num,
+		      bool add);
+int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		       struct idpf_ctlq_msg q_msg[]);
+int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+			  struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 4951ea1c4a..f5d92a019e 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -46,6 +46,7 @@ if js_dep.found()
         'cpfl_flow.c',
         'cpfl_flow_parser.c',
         'cpfl_rules.c',
+	    'cpfl_fxp_rule.c',
     )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v3 7/9] net/cpfl: add fxp flow engine
  2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
                       ` (6 preceding siblings ...)
  2023-09-06  9:34     ` [PATCH v3 6/9] net/cpfl: add fxp rule module Wenjing Qiao
@ 2023-09-06  9:34     ` Wenjing Qiao
  2023-09-06  9:34     ` [PATCH v3 8/9] net/cpfl: add flow support for representor Wenjing Qiao
                       ` (2 subsequent siblings)
  10 siblings, 0 replies; 128+ messages in thread
From: Wenjing Qiao @ 2023-09-06  9:34 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Adapt fxp low level as a flow engine.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h          |  27 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 583 ++++++++++++++++++++++++
 drivers/net/cpfl/meson.build            |   1 +
 3 files changed, 611 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 6b02573b4a..e30fa0ed82 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -92,6 +92,8 @@
 
 #define CPFL_RX_CFGQ_NUM	4
 #define CPFL_TX_CFGQ_NUM	4
+#define CPFL_FPCP_CFGQ_TX	0
+#define CPFL_FPCP_CFGQ_RX	1
 #define CPFL_CFGQ_NUM		8
 
 #define CPFL_INVALID_HW_ID	UINT16_MAX
@@ -217,6 +219,8 @@ struct cpfl_adapter_ext {
 	struct rte_hash *repr_whitelist_hash;
 
 	struct cpfl_flow_js_parser *flow_parser;
+	struct rte_bitmap *mod_bm;
+	void *mod_bm_mem;
 
 	/* ctrl vport and ctrl queues. */
 	struct cpfl_vport ctrl_vport;
@@ -310,4 +314,27 @@ cpfl_get_vsi_id(struct cpfl_itf *itf)
 	return CPFL_INVALID_HW_ID;
 }
 
+static inline struct cpfl_itf *
+cpfl_get_itf_by_port_id(uint16_t port_id)
+{
+	struct rte_eth_dev *dev;
+
+	if (port_id >= RTE_MAX_ETHPORTS) {
+		PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS);
+		return NULL;
+	}
+
+	dev = &rte_eth_devices[port_id];
+	if (dev->state == RTE_ETH_DEV_UNUSED) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id);
+		return NULL;
+	}
+
+	if (!dev->data) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id);
+		return NULL;
+	}
+
+	return CPFL_DEV_TO_ITF(dev);
+}
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
new file mode 100644
index 0000000000..e0c08a77c3
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <math.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_memcpy.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include <ethdev_driver.h>
+#include "cpfl_rules.h"
+#include "cpfl_logs.h"
+#include "cpfl_ethdev.h"
+#include "cpfl_flow.h"
+#include "cpfl_fxp_rule.h"
+#include "cpfl_flow_parser.h"
+
+#define CPFL_COOKIE_DEF		0x1000
+#define CPFL_MOD_COOKIE_DEF	0x1237561
+#define CPFL_PREC_DEF		1
+#define CPFL_PREC_SET		5
+#define CPFL_TYPE_ID		3
+#define CPFL_OFFSET		0x0a
+#define CPFL_HOST_ID_DEF	0
+#define CPFL_PF_NUM_DEF		0
+#define CPFL_PORT_NUM_DEF	0
+#define CPFL_RESP_REQ_DEF	2
+#define CPFL_PIN_TO_CACHE_DEF	0
+#define CPFL_CLEAR_MIRROR_1ST_STATE_DEF	0
+#define CPFL_FIXED_FETCH_DEF	0
+#define CPFL_PTI_DEF		0
+#define CPFL_MOD_OBJ_SIZE_DEF	0
+#define CPFL_PIN_MOD_CONTENT_DEF	0
+
+#define CPFL_MAX_MOD_CONTENT_INDEX	256
+#define CPFL_MAX_MR_ACTION_NUM	8
+
+/* Struct used when parse detailed rule information with json file */
+struct cpfl_rule_info_meta {
+	struct cpfl_flow_pr_action pr_action;	/* json action field of pattern rule */
+	uint32_t pr_num;			/* number of pattern rules */
+	uint32_t mr_num;			/* number of modification rules */
+	uint32_t rule_num;			/* number of all rules */
+	struct cpfl_rule_info rules[0];
+};
+
+static uint32_t cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad);
+static void cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
+uint64_t cpfl_rule_cookie = CPFL_COOKIE_DEF;
+
+static int
+cpfl_fxp_create(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim = meta;
+	struct cpfl_vport *vport;
+
+	if (!rim)
+		return ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		/* Every vport has one pair control queues configured to handle message.
+		 * Even index is tx queue and odd index is rx queue.
+		 */
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
+				rim->rules, rim->rule_num, true);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "cpfl filter create flow fail");
+		rte_free(rim);
+		return ret;
+	}
+
+	flow->rule = rim;
+
+	return ret;
+}
+
+static inline void
+cpfl_fxp_rule_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+	flow->rule = NULL;
+}
+
+static int
+cpfl_fxp_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim;
+	uint32_t i;
+	struct cpfl_vport *vport;
+
+	rim = flow->rule;
+	if (!rim) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "no such flow create by cpfl filter");
+
+		return -rte_errno;
+	}
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		ret = -rte_errno;
+		goto err;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules,
+				rim->rule_num, false);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to destroy cpfl filter rule");
+		goto err;
+	}
+
+	/* free mod index */
+	for (i = rim->pr_num; i < rim->rule_num; i++)
+		cpfl_fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
+err:
+	cpfl_fxp_rule_free(flow);
+	return ret;
+}
+
+static bool
+cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action,
+		       struct cpfl_rule_info_meta *rim,
+		       int i)
+{
+	if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		struct cpfl_rule_info *rinfo = &rim->rules[i];
+
+		rinfo->type = CPFL_RULE_TYPE_SEM;
+		rinfo->sem.prof_id = pr_action->sem.prof;
+		rinfo->sem.sub_prof_id = pr_action->sem.subprof;
+		rinfo->sem.key_byte_len = pr_action->sem.keysize;
+		rte_memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len);
+		rinfo->sem.pin_to_cache = CPFL_PIN_TO_CACHE_DEF;
+		rinfo->sem.fixed_fetch = CPFL_FIXED_FETCH_DEF;
+	} else {
+		PMD_DRV_LOG(ERR, "Invalid pattern item.");
+		return false;
+	}
+
+	return true;
+}
+
+static int
+cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_rule_info *match_rinfo,
+		       struct cpfl_rule_info *mod_rinfo,
+		       const struct cpfl_flow_mr_action *mr_action)
+{
+	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
+	uint32_t mod_idx;
+	int i;
+	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
+	union cpfl_action_set *act_set =
+		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
+
+	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
+		return -EINVAL;
+
+	*act_set = cpfl_act_mod_profile(CPFL_PREC_DEF,
+					mr_action->mod.prof,
+					CPFL_PTI_DEF,
+					0, /* append */
+					0, /* prepend */
+					CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_idx = cpfl_fxp_mod_idx_alloc(adapter);
+	if (mod_idx == CPFL_MAX_MOD_CONTENT_INDEX) {
+		PMD_DRV_LOG(ERR, "Out of Mod Index.");
+		return -ENOMEM;
+	}
+
+	*act_set = cpfl_act_mod_addr(CPFL_PREC_DEF, mod_idx);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
+	minfo->mod_obj_size = CPFL_MOD_OBJ_SIZE_DEF;
+	minfo->pin_mod_content = CPFL_PIN_MOD_CONTENT_DEF;
+	minfo->mod_index = mod_idx;
+	mod_rinfo->cookie = CPFL_MOD_COOKIE_DEF;
+	mod_rinfo->port_num = CPFL_PORT_NUM_DEF;
+	mod_rinfo->resp_req = CPFL_RESP_REQ_DEF;
+
+	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
+	for (i = 0; i < minfo->mod_content_byte_len; i++)
+		minfo->mod_content[i] = mr_action->mod.data[i];
+
+	return 0;
+}
+
+#define CPFL_FXP_MAX_QREGION_SIZE 128
+#define CPFL_INVALID_QUEUE_ID -2
+static int
+cpfl_fxp_parse_action(struct cpfl_itf *itf,
+		      const struct rte_flow_action *actions,
+		      const struct cpfl_flow_mr_action *mr_action,
+		      struct cpfl_rule_info_meta *rim,
+		      int priority,
+		      int index)
+{
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *rss;
+	struct rte_eth_dev_data *data;
+	enum rte_flow_action_type action_type;
+	struct cpfl_vport *vport;
+	/* used when action is PORT_REPRESENTOR type */
+	struct cpfl_itf *dst_itf;
+	uint16_t dev_id; /* vsi id */
+	int queue_id = -1;
+	bool fwd_vsi = false;
+	bool fwd_q = false;
+	uint32_t i;
+	struct cpfl_rule_info *rinfo = &rim->rules[index];
+	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
+
+	priority = CPFL_PREC_MAX - priority;
+	for (action = actions; action->type !=
+	     RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			if (!fwd_vsi)
+				fwd_vsi = true;
+			else
+				goto err;
+
+			act_ethdev = action->conf;
+			dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id);
+
+			if (!dst_itf)
+				goto err;
+
+			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
+				vport = (struct cpfl_vport *)dst_itf;
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			} else {
+				queue_id = CPFL_INVALID_QUEUE_ID;
+			}
+
+			dev_id = cpfl_get_vsi_id(dst_itf);
+
+			if (dev_id == CPFL_INVALID_HW_ID)
+				goto err;
+
+			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			act_q = action->conf;
+			data = itf->data;
+			if (act_q->index >= data->nb_rx_queues)
+				goto err;
+
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += act_q->index;
+			*act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			if (rss->queue_num <= 1)
+				goto err;
+			for (i = 0; i < rss->queue_num - 1; i++) {
+				if (rss->queue[i + 1] != rss->queue[i] + 1)
+					goto err;
+			}
+			data = itf->data;
+			if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues)
+				goto err;
+			if (!(rte_is_power_of_2(rss->queue_num) &&
+			      rss->queue_num <= CPFL_FXP_MAX_QREGION_SIZE))
+				goto err;
+
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += rss->queue[0];
+			*act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id,
+								  log(rss->queue_num) / log(2), 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			(*act_set).data = cpfl_act_drop(priority).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			(*act_set).data = cpfl_act_set_commit_mode(priority, 0).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			goto err;
+		}
+	}
+
+	if (mr_action) {
+		uint32_t i;
+
+		for (i = 0; i < rim->mr_num; i++)
+			if (cpfl_parse_mod_content(itf->adapter, rinfo,
+						   &rim->rules[rim->pr_num + i],
+						   &mr_action[i]))
+				goto err;
+	}
+
+	return 0;
+
+err:
+	PMD_DRV_LOG(ERR, "Invalid action type");
+	return -EINVAL;
+}
+
+static void
+cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo)
+{
+	if (cpfl_rule_cookie == ~0llu)
+		cpfl_rule_cookie = CPFL_COOKIE_DEF;
+	rinfo->cookie = cpfl_rule_cookie++;
+	rinfo->host_id = CPFL_HOST_ID_DEF;
+	rinfo->port_num = CPFL_PORT_NUM_DEF;
+	rinfo->resp_req = CPFL_RESP_REQ_DEF;
+	rinfo->clear_mirror_1st_state = CPFL_CLEAR_MIRROR_1ST_STATE_DEF;
+}
+
+static bool
+cpfl_is_mod_action(const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			return true;
+		default:
+			continue;
+		}
+	}
+	return false;
+}
+
+static int
+cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      void **meta)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_pr_action pr_action = { 0 };
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_flow_mr_action mr_action[CPFL_MAX_MR_ACTION_NUM] = { 0 };
+	uint32_t pr_num = 0;
+	uint32_t mr_num = 0;
+	struct cpfl_rule_info_meta *rim;
+	int ret;
+
+	ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr, &pr_action);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "No Match pattern support.");
+		return -EINVAL;
+	}
+
+	if (cpfl_is_mod_action(actions)) {
+		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "action parse fails.");
+			return -EINVAL;
+		}
+		mr_num++;
+	}
+
+	pr_num = 1;
+	rim = rte_zmalloc(NULL,
+			  sizeof(struct cpfl_rule_info_meta) +
+			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
+			  0);
+	if (!rim)
+		return -ENOMEM;
+
+	rim->pr_action = pr_action;
+	rim->pr_num = pr_num;
+	rim->mr_num = mr_num;
+	rim->rule_num = pr_num + mr_num;
+
+	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid pattern");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid action");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	cpfl_fill_rinfo_default_value(&rim->rules[0]);
+
+	if (!meta)
+		rte_free(rim);
+	else
+		*meta = rim;
+
+	return 0;
+}
+
+static int
+cpfl_fxp_mod_init(struct cpfl_adapter_ext *ad)
+{
+	uint32_t size = rte_bitmap_get_memory_footprint(CPFL_MAX_MOD_CONTENT_INDEX);
+	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+	if (!mem)
+		return -ENOMEM;
+
+	/* a set bit represent a free slot */
+	ad->mod_bm = rte_bitmap_init_with_all_set(CPFL_MAX_MOD_CONTENT_INDEX, mem, size);
+	if (!ad->mod_bm) {
+		rte_free(mem);
+		return -EINVAL;
+	}
+
+	ad->mod_bm_mem = mem;
+
+	return 0;
+}
+
+static void
+cpfl_fxp_mod_uninit(struct cpfl_adapter_ext *ad)
+{
+	rte_free(ad->mod_bm_mem);
+	ad->mod_bm_mem = NULL;
+	ad->mod_bm = NULL;
+}
+
+static uint32_t
+cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad)
+{
+	uint64_t slab = 0;
+	uint32_t pos = 0;
+
+	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
+		return CPFL_MAX_MOD_CONTENT_INDEX;
+
+	pos += __builtin_ffsll(slab) - 1;
+	rte_bitmap_clear(ad->mod_bm, pos);
+
+	return pos;
+}
+
+static void
+cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
+{
+	rte_bitmap_set(ad->mod_bm, idx);
+}
+
+static int
+cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
+	       struct rte_flow *flow __rte_unused,
+	       struct rte_flow_query_count *count __rte_unused,
+	       struct rte_flow_error *error)
+{
+	rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_HANDLE,
+			   NULL,
+			   "count action not supported by this module");
+
+	return -rte_errno;
+}
+
+static void
+cpfl_fxp_uninit(struct cpfl_adapter_ext *ad)
+{
+	cpfl_fxp_mod_uninit(ad);
+}
+
+static int
+cpfl_fxp_init(struct cpfl_adapter_ext *ad)
+{
+	int ret = 0;
+
+	ret = cpfl_fxp_mod_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct
+cpfl_flow_engine cpfl_fxp_engine = {
+	.type = CPFL_FLOW_ENGINE_FXP,
+	.init = cpfl_fxp_init,
+	.uninit = cpfl_fxp_uninit,
+	.create = cpfl_fxp_create,
+	.destroy = cpfl_fxp_destroy,
+	.query_count = cpfl_fxp_query,
+	.parse_pattern_action = cpfl_fxp_parse_pattern_action,
+};
+
+RTE_INIT(cpfl_sw_engine_init)
+{
+	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
+
+	cpfl_flow_engine_register(engine);
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index f5d92a019e..1e86d7ee15 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -44,6 +44,7 @@ js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
     sources += files(
         'cpfl_flow.c',
+	'cpfl_flow_engine_fxp.c',
         'cpfl_flow_parser.c',
         'cpfl_rules.c',
 	    'cpfl_fxp_rule.c',
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v3 8/9] net/cpfl: add flow support for representor
  2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
                       ` (7 preceding siblings ...)
  2023-09-06  9:34     ` [PATCH v3 7/9] net/cpfl: add fxp flow engine Wenjing Qiao
@ 2023-09-06  9:34     ` Wenjing Qiao
  2023-09-06  9:34     ` [PATCH v3 9/9] app/test-pmd: refine encap content Wenjing Qiao
  2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
  10 siblings, 0 replies; 128+ messages in thread
From: Wenjing Qiao @ 2023-09-06  9:34 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Add flow support for representor, so representor can
create, destroy, validate and flush rules.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 doc/guides/nics/cpfl.rst                | 13 ++++
 doc/guides/rel_notes/release_23_11.rst  |  1 +
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 90 ++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_representor.c     | 29 ++++++++
 4 files changed, 130 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 7032dd1a1a..e2fe5430ed 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -196,3 +196,16 @@ low level hardware resources defined in a DDP package file.
    .. code-block:: console
 
    dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
+
+#. Create one flow to forward ETH-IPV4-TCP from I/O port to a local(CPF's) vport::
+
+   .. code-block:: console
+
+   flow create 0 ingress group 1 pattern eth dst is 00:01:00:00:03:14 / ipv4 src is 192.168.0.1 \
+   dst is 192.168.0.2 / tcp / end actions port_representor port_id 0 / end
+
+#. Send the packet, and it should be displayed on PMD::
+
+   .. code-block:: console
+
+   sendp(Ether(dst='00:01:00:00:03:14')/IP(src='192.168.0.1',dst='192.168.0.2')/TCP(),iface="enp24s0f0")
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 688bee4d6d..eded3ecc84 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -58,6 +58,7 @@ New Features
 * **Updated Intel cpfl driver.**
 
   * Added support for port representor.
+  * Added support for rte_flow.
 
 Removed Items
 -------------
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index e0c08a77c3..fed18d8349 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -73,6 +73,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 	struct cpfl_adapter_ext *ad = itf->adapter;
 	struct cpfl_rule_info_meta *rim = meta;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	if (!rim)
 		return ret;
@@ -83,6 +84,10 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 		 * Even index is tx queue and odd index is rx queue.
 		 */
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -122,6 +127,7 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	uint32_t i;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	rim = flow->rule;
 	if (!rim) {
@@ -135,6 +141,10 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	if (itf->type == CPFL_ITF_TYPE_VPORT) {
 		vport = (struct cpfl_vport *)itf;
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -257,6 +267,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 	int queue_id = -1;
 	bool fwd_vsi = false;
 	bool fwd_q = false;
+	bool is_vsi;
 	uint32_t i;
 	struct cpfl_rule_info *rinfo = &rim->rules[index];
 	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
@@ -267,6 +278,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 		action_type = action->type;
 		switch (action_type) {
 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
 			if (!fwd_vsi)
 				fwd_vsi = true;
 			else
@@ -285,12 +297,20 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 				queue_id = CPFL_INVALID_QUEUE_ID;
 			}
 
-			dev_id = cpfl_get_vsi_id(dst_itf);
+			is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
+				  dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR);
+			if (is_vsi)
+				dev_id = cpfl_get_vsi_id(dst_itf);
+			else
+				dev_id = cpfl_get_port_id(dst_itf);
 
 			if (dev_id == CPFL_INVALID_HW_ID)
 				goto err;
 
-			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			if (is_vsi)
+				*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			else
+				*act_set = cpfl_act_fwd_port(0, priority, 0, dev_id);
 			act_set++;
 			rinfo->act_byte_len += sizeof(union cpfl_action_set);
 			break;
@@ -414,6 +434,64 @@ cpfl_is_mod_action(const struct rte_flow_action actions[])
 	return false;
 }
 
+static bool
+cpfl_fxp_get_metadata_port(struct cpfl_itf *itf,
+			   const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	const struct rte_flow_action_ethdev *ethdev;
+	struct cpfl_itf *target_itf;
+	bool ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		ret = cpfl_metadata_write_port_id(itf);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "fail to write port id");
+			return false;
+		}
+	}
+
+	ret = cpfl_metadata_write_sourcevsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write source vsi id");
+		return false;
+	}
+
+	ret = cpfl_metadata_write_vsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write vsi id");
+		return false;
+	}
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			ethdev = (const struct rte_flow_action_ethdev *)action->conf;
+			target_itf = cpfl_get_itf_by_port_id(ethdev->port_id);
+			if (!target_itf) {
+				PMD_DRV_LOG(ERR, "fail to get target_itf by port id");
+				return false;
+			}
+			ret = cpfl_metadata_write_targetvsi(target_itf);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "fail to write target vsi id");
+				return false;
+			}
+			break;
+		default:
+			continue;
+		}
+	}
+
+	return true;
+}
+
 static int
 cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
@@ -430,7 +508,13 @@ cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	int ret;
 
-	ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr, &pr_action);
+	ret = cpfl_fxp_get_metadata_port(itf, actions);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Fail to save metadata.");
+		return -EINVAL;
+	}
+
+	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "No Match pattern support.");
 		return -EINVAL;
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 83069d0830..6e7d3fd0a6 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,8 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 static int
 cpfl_repr_whitelist_update(struct cpfl_adapter_ext *adapter,
@@ -325,6 +327,22 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
+static int
+cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev,
+			   const struct rte_flow_ops **ops)
+{
+	if (!dev)
+		return -EINVAL;
+
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -336,6 +354,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.tx_queue_setup		= idpf_repr_tx_queue_setup,
 
 	.link_update		= cpfl_repr_link_update,
+	.flow_ops_get		= cpfl_dev_repr_flow_ops_get,
 };
 
 static int
@@ -344,6 +363,7 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
 	struct cpfl_repr_param *param = init_param;
 	struct cpfl_adapter_ext *adapter = param->adapter;
+	int ret;
 
 	repr->repr_id = param->repr_id;
 	repr->vport_info = param->vport_info;
@@ -353,6 +373,15 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	if (repr->vport_info->vport_info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
 		repr->func_up = true;
 
+	TAILQ_INIT(&repr->itf.flow_list);
+	memset(repr->itf.dma, 0, sizeof(repr->itf.dma));
+	memset(repr->itf.msg, 0, sizeof(repr->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		return ret;
+
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v3 9/9] app/test-pmd: refine encap content
  2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
                       ` (8 preceding siblings ...)
  2023-09-06  9:34     ` [PATCH v3 8/9] net/cpfl: add flow support for representor Wenjing Qiao
@ 2023-09-06  9:34     ` Wenjing Qiao
  2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
  10 siblings, 0 replies; 128+ messages in thread
From: Wenjing Qiao @ 2023-09-06  9:34 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing
  Cc: mingxia.liu, stable

From: Yuying Zhang <yuying.zhang@intel.com>

Refine vxlan encap content of all protocol headers.

Fixes: 1960be7d32f8 ("app/testpmd: add VXLAN encap/decap")
Cc: stable@dpdk.org

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 app/test-pmd/cmdline_flow.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 94827bcc4a..b6cc0d9620 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -8514,7 +8514,7 @@ parse_setup_vxlan_encap_data(struct action_vxlan_encap_data *action_vxlan_encap_
 				.type = RTE_FLOW_ITEM_TYPE_END,
 			},
 		},
-		.item_eth.hdr.ether_type = 0,
+		.item_eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4),
 		.item_vlan = {
 			.hdr.vlan_tci = vxlan_encap_conf.vlan_tci,
 			.hdr.eth_proto = 0,
@@ -8522,24 +8522,32 @@ parse_setup_vxlan_encap_data(struct action_vxlan_encap_data *action_vxlan_encap_
 		.item_ipv4.hdr = {
 			.src_addr = vxlan_encap_conf.ipv4_src,
 			.dst_addr = vxlan_encap_conf.ipv4_dst,
+			.version_ihl = RTE_IPV4_VHL_DEF,
+			.next_proto_id = IPPROTO_UDP,
+			.time_to_live = IPDEFTTL,
+			.hdr_checksum = rte_cpu_to_be_16(1),
 		},
 		.item_udp.hdr = {
 			.src_port = vxlan_encap_conf.udp_src,
 			.dst_port = vxlan_encap_conf.udp_dst,
+			.dgram_cksum = RTE_BE16(0x01),
 		},
-		.item_vxlan.hdr.flags = 0,
+		.item_vxlan.hdr.flags = 0x08,
 	};
 	memcpy(action_vxlan_encap_data->item_eth.hdr.dst_addr.addr_bytes,
 	       vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(action_vxlan_encap_data->item_eth.hdr.src_addr.addr_bytes,
 	       vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
 	if (!vxlan_encap_conf.select_ipv4) {
+		action_vxlan_encap_data->item_eth.type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
 		memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
 		       &vxlan_encap_conf.ipv6_src,
 		       sizeof(vxlan_encap_conf.ipv6_src));
 		memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
 		       &vxlan_encap_conf.ipv6_dst,
 		       sizeof(vxlan_encap_conf.ipv6_dst));
+		action_vxlan_encap_data->item_ipv6.hdr.proto = IPPROTO_UDP;
+		action_vxlan_encap_data->item_ipv6.hdr.hop_limits = IPDEFTTL;
 		action_vxlan_encap_data->items[2] = (struct rte_flow_item){
 			.type = RTE_FLOW_ITEM_TYPE_IPV6,
 			.spec = &action_vxlan_encap_data->item_ipv6,
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v3 2/9] net/cpfl: add flow json parser
  2023-09-06  9:34     ` [PATCH v3 2/9] net/cpfl: add flow json parser Wenjing Qiao
@ 2023-09-08  6:26       ` Liu, Mingxia
  2023-09-11  6:24       ` Wu, Jingjing
  1 sibling, 0 replies; 128+ messages in thread
From: Liu, Mingxia @ 2023-09-08  6:26 UTC (permalink / raw)
  To: Qiao, Wenjing, Zhang, Yuying, dev, Zhang, Qi Z, Wu, Jingjing,
	Xing, Beilei



> -----Original Message-----
> From: Qiao, Wenjing <wenjing.qiao@intel.com>
> Sent: Wednesday, September 6, 2023 5:34 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; dev@dpdk.org; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Cc: Liu, Mingxia <mingxia.liu@intel.com>; Qiao, Wenjing
> <wenjing.qiao@intel.com>
> Subject: [PATCH v3 2/9] net/cpfl: add flow json parser
> 
> A JSON file will be used to direct DPDK CPF PMD to
> parse rte_flow tokens into low level hardware resources
> defined in a DDP package file.
> 
> Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.h      |   70 +
>  drivers/net/cpfl/cpfl_flow_parser.c | 1910 +++++++++++++++++++++++++++
>  drivers/net/cpfl/cpfl_flow_parser.h |  236 ++++
>  drivers/net/cpfl/meson.build        |    3 +
>  4 files changed, 2219 insertions(+)
>  create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
>  create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
> 
> +static int
> +cpfl_flow_js_pattern_key_attr(json_object *cjson_pr_key_attr, struct
> cpfl_flow_js_pr *js_pr)
> +{
> +	int i, len;
> +	struct cpfl_flow_js_pr_key_attr *attr;
> +
> +	len = json_object_array_length(cjson_pr_key_attr);
> +	js_pr->key.attributes = rte_malloc(NULL, sizeof(struct
> cpfl_flow_js_pr_key_attr), 0);
> +	if (!js_pr->key.attributes) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +	js_pr->key.attr_size = len;
> +	attr = js_pr->key.attributes;
> +	for (i = 0; i < len; i++) {
> +		json_object *object;
> +		const char *name;
> +		uint16_t value = 0;
> +		int ret;
> +
> +		object = json_object_array_get_idx(cjson_pr_key_attr, i);
> +		name = cpfl_json_object_to_string(object, "Name");
> +		if (!name) {
> +			rte_free(js_pr->key.attributes);
> +			PMD_DRV_LOG(ERR, "Can not parse string 'Name'.");
> +			return -EINVAL;
[Liu, Mingxia] Better to use goto statement as other similar function do?

> +		}
> +		ret = cpfl_json_object_to_uint16(object, "Value", &value);
> +		if (ret < 0) {
> +			rte_free(js_pr->key.attributes);
> +			PMD_DRV_LOG(ERR, "Can not parse 'value'.");
> +			return -EINVAL;
> +		}
> +		if (strcmp(name, "ingress") == 0) {
> +			attr->ingress = value;
> +		} else if (strcmp(name, "egress") == 0) {
> +			attr->egress = value;
> +		} else {
> +			/* TODO: more... */
> +			rte_free(js_pr->key.attributes);
> +			PMD_DRV_LOG(ERR, "Not support attr name: %s.",
> name);
> +			return -EINVAL;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_key_proto_field(json_object *cjson_field,
> +				     struct cpfl_flow_js_pr_key_proto *js_field)
> +{
> +	int len, i;
> +
> +	if (!cjson_field)
> +		return 0;
> +	len = json_object_array_length(cjson_field);
> +	js_field->fields_size = len;
> +	if (len == 0)
> +		return 0;
> +	js_field->fields =
> +	    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len,
> 0);
> +	if (!js_field->fields) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +	for (i = 0; i < len; i++) {
> +		json_object *object;
> +		const char *name, *mask;
> +
> +		object = json_object_array_get_idx(cjson_field, i);
> +		name = cpfl_json_object_to_string(object, "name");
> +		if (!name) {
> +			PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
> +			goto err;
> +		}
> +		if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
> +			PMD_DRV_LOG(ERR, "The 'name' is too long.");
> +			goto err;
> +		}
> +		memcpy(js_field->fields[i].name, name, strlen(name));
> +
> +		if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
> +		    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
> +			mask = cpfl_json_object_to_string(object, "mask");
> +			if (!mask) {
> +				PMD_DRV_LOG(ERR, "Can not parse string
> 'mask'.");
> +				goto err;
> +			}
> +			memcpy(js_field->fields[i].mask, mask, strlen(mask));
[Liu, Mingxia] Need to check the length and validation of mask?

> +		} else {
> +			uint32_t mask_32b;
> +			int ret;
> +
> +			ret = cpfl_json_object_to_uint32(object, "mask",
> &mask_32b);
> +			if (ret < 0) {
> +				PMD_DRV_LOG(ERR, "Can not parse uint32
> 'mask'.");
> +				goto err;
> +			}
> +			js_field->fields[i].mask_32b = mask_32b;
> +		}
> +	}
> +
> +	return 0;
> +
> +err:
> +	rte_free(js_field->fields);
> +	return -EINVAL;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_key_proto(json_object *cjson_pr_key_proto, struct
> cpfl_flow_js_pr *js_pr)
> +{
> +	int len, i, ret;
> +
> +	len = json_object_array_length(cjson_pr_key_proto);
> +	js_pr->key.proto_size = len;
> +	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct
> cpfl_flow_js_pr_key_proto) * len, 0);
> +	if (!js_pr->key.protocols) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +
> +	for (i = 0; i < len; i++) {
> +		json_object *object, *cjson_pr_key_proto_fields;
> +		const char *type;
> +		enum rte_flow_item_type item_type;
> +
> +		object = json_object_array_get_idx(cjson_pr_key_proto, i);
> +		/* pr->key->proto->type */
> +		type = cpfl_json_object_to_string(object, "type");
> +		if (!type) {
> +			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
> +			goto err;
> +		}
> +		item_type = cpfl_get_item_type_by_str(type);
> +		if (item_type == RTE_FLOW_ITEM_TYPE_VOID)
> +			goto err;
> +		js_pr->key.protocols[i].type = item_type;
> +		/* pr->key->proto->fields */
> +		cjson_pr_key_proto_fields = json_object_object_get(object,
> "fields");
> +		ret =
> cpfl_flow_js_pattern_key_proto_field(cjson_pr_key_proto_fields,
> +							   &js_pr-
> >key.protocols[i]);
> +		if (ret < 0)
> +			goto err;
> +	}
> +
> +	return 0;
> +
> +err:
> +	rte_free(js_pr->key.protocols);
> +	return -EINVAL;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_act_fv_proto(json_object *cjson_value, struct
> cpfl_flow_js_fv *js_fv)
> +{
> +	uint16_t layer = 0, offset = 0, mask = 0;
> +	const char *header;
> +	enum rte_flow_item_type type;
> +	int ret;
> +
> +	ret = cpfl_json_object_to_uint16(cjson_value, "layer", &layer);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
> +		return -EINVAL;
> +	}
> +
> +	header = cpfl_json_object_to_string(cjson_value, "header");
> +	if (!header) {
> +		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
> +		return -EINVAL;
> +	}
> +	ret = cpfl_json_object_to_uint16(cjson_value, "offset", &offset);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
> +		return -EINVAL;
> +	}
> +	ret = cpfl_json_object_to_uint16(cjson_value, "mask", &mask);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
> +		return -EINVAL;
> +	}
> +	js_fv->proto.layer = layer;
> +	js_fv->proto.offset = offset;
> +	js_fv->proto.mask = mask;
> +	type = cpfl_get_item_type_by_str(header);
> +	if (type == RTE_FLOW_ITEM_TYPE_VOID)
> +		return -EINVAL;
> +	js_fv->proto.header = type;
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_act_fv_metadata(json_object *cjson_value, struct
> cpfl_flow_js_fv *js_fv)
> +{
> +	int ret;
> +
> +	ret = cpfl_json_object_to_uint16(cjson_value, "type", &js_fv-
> >meta.type);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
> +		return ret;
> +	}
> +	ret = cpfl_json_object_to_uint16(cjson_value, "offset", &js_fv-
> >meta.offset);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
> +		return ret;
> +	}
> +	ret = cpfl_json_object_to_uint16(cjson_value, "mask", &js_fv-
> >meta.mask);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
> +		return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_act_fv(json_object *cjson_fv, struct
> cpfl_flow_js_pr_action *js_act)
> +{
> +	int len, i;
> +
> +	len = json_object_array_length(cjson_fv);
> +	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len,
> 0);
> +	if (!js_act->sem.fv) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +	js_act->sem.fv_size = len;
> +	for (i = 0; i < len; i++) {
> +		struct cpfl_flow_js_fv *js_fv;
> +		json_object *object, *cjson_value;
> +		uint16_t offset = 0;
> +		const char *type;
> +		int ret;
> +
> +		object = json_object_array_get_idx(cjson_fv, i);
> +		js_fv = &js_act->sem.fv[i];
> +		ret = cpfl_json_object_to_uint16(object, "offset", &offset);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
> +			goto err;
> +		}
> +		js_fv->offset = offset;
> +		type = cpfl_json_object_to_string(object, "type");
> +		if (!type) {
> +			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
> +			goto err;
> +		}
> +		cjson_value = json_object_object_get(object, "value");
> +		if (strcmp(type, "immediate") == 0) {
> +			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
> +			js_fv->immediate = json_object_get_int(cjson_value);
> +		} else if (strcmp(type, "metadata") == 0) {
> +			js_fv->type = CPFL_FV_TYPE_METADATA;
> +			cpfl_flow_js_pattern_act_fv_metadata(cjson_value,
> js_fv);
> +		} else if (strcmp(type, "protocol") == 0) {
> +			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
> +			cpfl_flow_js_pattern_act_fv_proto(cjson_value, js_fv);
> +		} else {
> +			PMD_DRV_LOG(ERR, "Not support this type: %s.",
> type);
> +			goto err;
> +		}
> +	}
> +
> +	return 0;
> +
> +err:
> +	rte_free(js_act->sem.fv);
> +	return -EINVAL;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_per_act(json_object *cjson_per_act, struct
> cpfl_flow_js_pr_action *js_act)
> +{
> +	const char *type;
> +	int ret;
> +
> +	/* pr->actions->type */
> +	type = cpfl_json_object_to_string(cjson_per_act, "type");
> +	if (!type) {
> +		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
> +		return -EINVAL;
> +	}
> +	/* pr->actions->data */
> +	if (strcmp(type, "sem") == 0) {
> +		json_object *cjson_fv, *cjson_pr_action_sem;
> +
> +		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
> +		cjson_pr_action_sem = json_object_object_get(cjson_per_act,
> "data");
> +		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem,
> "profile",
> +						 &js_act->sem.prof);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
> +			return -EINVAL;
> +		}
> +		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem,
> "subprofile",
> +						 &js_act->sem.subprof);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
> +			return -EINVAL;
> +		}
> +		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem,
> "keysize",
> +						 &js_act->sem.keysize);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
> +			return -EINVAL;
> +		}
> +		cjson_fv = json_object_object_get(cjson_pr_action_sem,
> "fieldvectors");
> +		ret = cpfl_flow_js_pattern_act_fv(cjson_fv, js_act);
> +		if (ret < 0)
> +			return ret;
> +	} else {
> +		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_act(json_object *cjson_pr_act, struct cpfl_flow_js_pr
> *js_pr)
> +{
> +	int i, len, ret;
> +
> +	len = json_object_array_length(cjson_pr_act);
> +	js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action)
> * len, 0);
> +	if (!js_pr->actions) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +	js_pr->actions_size = len;
> +	for (i = 0; i < len; i++) {
> +		struct cpfl_flow_js_pr_action *js_act;
> +		json_object *object;
> +
> +		object = json_object_array_get_idx(cjson_pr_act, i);
> +		js_act = &js_pr->actions[i];
> +		ret = cpfl_flow_js_pattern_per_act(object, js_act);
> +		if (ret < 0) {
> +			rte_free(js_pr->actions);
> +			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
> +			return -EINVAL;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_rule(json_object *json_root, struct cpfl_flow_js_parser
> *parser)
> +{
> +	json_object *cjson_pr;
> +	int i, len;
> +
> +	/* Pattern Rules */
> +	cjson_pr = json_object_object_get(json_root, "patterns");
> +	if (!cjson_pr) {
> +		PMD_DRV_LOG(ERR, "The patterns is mandatory.");
> +		return -EINVAL;
> +	}
> +
> +	len = json_object_array_length(cjson_pr);
> +	parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len,
> 0);
> +	if (!parser->patterns) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +	parser->pr_size = len;
> +	for (i = 0; i < len; i++) {
> +		json_object *object, *cjson_pr_actions, *cjson_pr_key,
> *cjson_pr_key_proto,
> +		    *cjson_pr_key_attr;
> +		int ret;
> +
> +		object = json_object_array_get_idx(cjson_pr, i);
> +		/* pr->key */
> +		cjson_pr_key = json_object_object_get(object, "key");
> +		/* pr->key->protocols */
> +		cjson_pr_key_proto = json_object_object_get(cjson_pr_key,
> "protocols");
> +		ret = cpfl_flow_js_pattern_key_proto(cjson_pr_key_proto,
> &parser->patterns[i]);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse key->protocols.");
> +			goto err;
> +		}
> +		/* pr->key->attributes */
> +		cjson_pr_key_attr = json_object_object_get(cjson_pr_key,
> "attributes");
> +		ret = cpfl_flow_js_pattern_key_attr(cjson_pr_key_attr, &parser-
> >patterns[i]);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse key->attributes.");
> +			goto err;
> +		}
> +		/* pr->actions */
> +		cjson_pr_actions = json_object_object_get(object, "actions");
> +		ret = cpfl_flow_js_pattern_act(cjson_pr_actions, &parser-
> >patterns[i]);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
> +			goto err;
> +		}
> +	}
> +
> +	return 0;
> +
> +err:
> +	rte_free(parser->patterns);
> +	return -EINVAL;
> +}
> +
> +static int
> +cpfl_flow_js_mr_key(json_object *cjson_mr_key, struct cpfl_flow_js_mr_key
> *js_mr_key)
> +{
> +	int len, i;
> +
> +	len = json_object_array_length(cjson_mr_key);
> +	js_mr_key->actions = rte_malloc(NULL, sizeof(struct
> cpfl_flow_js_mr_key_action) * len, 0);
> +	if (!js_mr_key->actions) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +	js_mr_key->actions_size = len;
> +	for (i = 0; i < len; i++) {
> +		json_object *object, *cjson_mr_key_data;
> +		const char *type;
> +		enum rte_flow_action_type act_type;
> +
> +		object = json_object_array_get_idx(cjson_mr_key, i);
> +		/* mr->key->actions->type */
> +		type = cpfl_json_object_to_string(object, "type");
> +		if (!type) {
> +			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
> +			goto err;
> +		}
> +		act_type = cpfl_get_action_type_by_str(type);
> +		if (act_type == RTE_FLOW_ACTION_TYPE_VOID)
> +			goto err;
> +		js_mr_key->actions[i].type = act_type;
> +		/* mr->key->actions->data */
> +		cjson_mr_key_data = json_object_object_get(object, "data");
> +		if (js_mr_key->actions[i].type ==
> RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
> +			json_object *cjson_mr_key_proto;
> +			int proto_size, j;
> +			struct cpfl_flow_js_mr_key_action_vxlan_encap
> *encap;
> +
> +			cjson_mr_key_proto =
> json_object_object_get(cjson_mr_key_data, "protocols");
> +			encap = &js_mr_key->actions[i].encap;
> +			if (!cjson_mr_key_proto) {
> +				encap->proto_size = 0;
> +				continue;
> +			}
> +			proto_size =
> json_object_array_length(cjson_mr_key_proto);
> +			encap->proto_size = proto_size;
> +			for (j = 0; j < proto_size; j++) {
> +				const char *s;
> +				json_object *subobject;
> +				enum rte_flow_item_type proto_type;
> +
> +				subobject =
> json_object_array_get_idx(cjson_mr_key_proto, j);
> +				s = json_object_get_string(subobject);
> +				proto_type = cpfl_get_item_type_by_str(s);
> +				if (proto_type ==
> RTE_FLOW_ITEM_TYPE_VOID) {
> +					PMD_DRV_LOG(ERR, "parse
> VXLAN_ENCAP failed.");
> +					goto err;
> +				}
> +				encap->protocols[j] = proto_type;
> +			}
> +		} else if (js_mr_key->actions[i].type !=
> RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
> +			PMD_DRV_LOG(ERR, "not support this type: %d.",
> js_mr_key->actions[i].type);
> +			goto err;
> +		}
> +	}
> +
> +	return 0;
> +
> +err:
> +	rte_free(js_mr_key->actions);
> +	return -EINVAL;
> +}
> +
> +static int
> +cpfl_flow_js_mr_layout(json_object *cjson_layout, struct
> cpfl_flow_js_mr_action_mod *js_mod)
> +{
> +	int len, i;
> +
> +	len = json_object_array_length(cjson_layout);
> +	js_mod->layout_size = len;
> +	if (len == 0)
> +		return 0;
> +	js_mod->layout = rte_malloc(NULL, sizeof(struct
> cpfl_flow_js_mr_layout) * len, 0);
> +	if (!js_mod->layout) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +
> +	for (i = 0; i < len; i++) {
> +		json_object *object;
> +		int index = 0, size = 0, offset = 0, ret;
> +		const char *hint;
> +
> +		object = json_object_array_get_idx(cjson_layout, i);
> +		ret = cpfl_json_object_to_int(object, "index", &index);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse 'index'.");
> +			goto err;
> +		}
> +		js_mod->layout[i].index = index;
> +		ret = cpfl_json_object_to_int(object, "size", &size);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse 'size'.");
> +			goto err;
> +		}
> +		js_mod->layout[i].size = size;
> +		ret = cpfl_json_object_to_int(object, "offset", &offset);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
> +			goto err;
> +		}
> +		js_mod->layout[i].offset = offset;
> +		hint = cpfl_json_object_to_string(object, "hint");
> +		if (!hint) {
> +			PMD_DRV_LOG(ERR, "Can not parse string 'hint'.");
> +			goto err;
> +		}
> +		memcpy(js_mod->layout[i].hint, hint, strlen(hint));
> +	}
> +
> +	return 0;
> +
> +err:
> +	rte_free(js_mod->layout);
> +	return -EINVAL;
> +}
> +
> +static int
> +cpfl_flow_js_mr_action(json_object *cjson_mr_act, struct
> cpfl_flow_js_mr_action *js_mr_act)
> +{
> +	json_object *cjson_mr_action_data;
> +	const char *type;
> +
> +	/* mr->action->type */
> +	type = cpfl_json_object_to_string(cjson_mr_act, "type");
> +	if (!type) {
> +		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
> +		return -EINVAL;
> +	}
> +	/* mr->action->data */
> +	cjson_mr_action_data = json_object_object_get(cjson_mr_act, "data");
> +	if (strcmp(type, "mod") == 0) {
> +		json_object *layout;
> +		uint16_t profile = 0;
> +		int ret;
> +
> +		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
> +		ret = cpfl_json_object_to_uint16(cjson_mr_action_data,
> "profile", &profile);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
> +			return -EINVAL;
> +		}
> +		js_mr_act->mod.prof = profile;
> +		layout = json_object_object_get(cjson_mr_action_data,
> "layout");
> +		ret = cpfl_flow_js_mr_layout(layout, &js_mr_act->mod);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse layout.");
> +			return ret;
> +		}
> +	} else  {
> +		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_flow_js_mod_rule(json_object *json_root, struct cpfl_flow_js_parser
> *parser)
> +{
> +	json_object *cjson_mr;
> +	int i, len;
> +
> +	cjson_mr = json_object_object_get(json_root, "modifications");
> +	if (!cjson_mr) {
> +		PMD_DRV_LOG(INFO, "The modifications is optional.");
> +		return 0;
> +	}
> +	len = json_object_array_length(cjson_mr);
> +	parser->mr_size = len;
> +	if (len == 0)
> +		return 0;
> +	parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr)
> * len, 0);
> +	if (!parser->modifications) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +	for (i = 0; i < len; i++) {
> +		int ret;
> +		json_object *object, *cjson_mr_key, *cjson_mr_action,
> *cjson_mr_key_action;
> +
> +		object = json_object_array_get_idx(cjson_mr, i);
> +		/* mr->key */
> +		cjson_mr_key = json_object_object_get(object, "key");
> +		/* mr->key->actions */
> +		cjson_mr_key_action = json_object_object_get(cjson_mr_key,
> "actions");
> +		ret = cpfl_flow_js_mr_key(cjson_mr_key_action, &parser-
> >modifications[i].key);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "parse mr_key failed.");
> +			goto err;
> +		}
> +		/* mr->action */
> +		cjson_mr_action = json_object_object_get(object, "action");
> +		ret = cpfl_flow_js_mr_action(cjson_mr_action, &parser-
> >modifications[i].action);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "parse mr_action failed.");
> +			goto err;
> +		}
> +	}
> +
> +	return 0;
> +
> +err:
> +	rte_free(parser->modifications);
> +	return -EINVAL;
> +}
> +
> +static int
> +cpfl_parser_init(json_object *json_root, struct cpfl_flow_js_parser *parser)
> +{
> +	int ret = 0;
> +
> +	ret = cpfl_flow_js_pattern_rule(json_root, parser);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
> +		return ret;
> +	}
> +	ret = cpfl_flow_js_mod_rule(json_root, parser);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
> +		return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +int
> +cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char
> *filename)
> +{
> +	struct cpfl_flow_js_parser *parser;
> +	json_object *root;
> +	int ret;
> +
> +	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser),
> 0);
> +	if (!parser) {
> +		PMD_DRV_LOG(ERR, "Not enough memory to create flow
> parser.");
> +		return -ENOMEM;
> +	}
> +	root = json_object_from_file(filename);
> +	if (!root) {
> +		PMD_DRV_LOG(ERR, "Can not load JSON file: %s.", filename);
> +		rte_free(parser);
> +		return -EINVAL;
[Liu, Mingxia] better to use goto free_parser; ?

> +	}
> +	ret = cpfl_parser_init(root, parser);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "parser init failed.");
> +		goto free_parser;
[Liu, Mingxia] If failed, need to json_object_put(root) ?

> +	}
> +	*flow_parser = parser;
> +	ret = json_object_put(root);
> +	if (ret != 1) {
> +		PMD_DRV_LOG(ERR, "Free json_object failed.");
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +free_parser:
> +	rte_free(parser);
> +	return -EINVAL;
> +}
> +
> +static void
> +cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
> +{
> +	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM)
> +		rte_free(pr_act->sem.fv);
> +}
> +
> +int
> +cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
> +{
> +	int i, j;
> +
> +	if (!parser)
> +		return 0;
> +
> +	for (i = 0; i < parser->pr_size; i++) {
> +		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
> +
> +		if (!pattern)
> +			return -EINVAL;
> +		for (j = 0; j < pattern->key.proto_size; j++)
> +			rte_free(pattern->key.protocols[j].fields);
> +		rte_free(pattern->key.protocols);
> +		rte_free(pattern->key.attributes);
> +
> +		for (j = 0; j < pattern->actions_size; j++) {
> +			struct cpfl_flow_js_pr_action *pr_act;
> +
> +			pr_act = &pattern->actions[j];
> +			cpfl_parser_free_pr_action(pr_act);
> +		}
> +		rte_free(pattern->actions);
> +	}
> +	rte_free(parser->patterns);
> +	for (i = 0; i < parser->mr_size; i++) {
> +		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
> +
> +		if (!mr)
> +			return -EINVAL;
> +		rte_free(mr->key.actions);
> +		rte_free(mr->action.mod.layout);
> +	}
> +	rte_free(parser->modifications);
> +	rte_free(parser);
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_get_items_length(const struct rte_flow_item *items)
> +{
> +	int length = 0;
> +	const struct rte_flow_item *item = items;
> +
> +	while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END)
> +		continue;
> +	return length;
> +}
> +
> +static int
> +cpfl_get_actions_length(const struct rte_flow_action *actions)
> +{
> +	int length = 0;
> +	const struct rte_flow_action *action = actions;
> +
> +	while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END)
> +		continue;
> +	return length;
> +}
> +
> +static int
> +cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item
> *items,
> +		       uint16_t offset, uint8_t *fv)
> +{
> +	uint16_t v_layer, v_offset, v_mask;
> +	enum rte_flow_item_type v_header;
> +	int j, layer, length;
> +	uint16_t temp_fv;
> +
> +	length = cpfl_get_items_length(items);
> +	v_layer = js_fv->proto.layer;
> +	v_header = js_fv->proto.header;
> +	v_offset = js_fv->proto.offset;
> +	v_mask = js_fv->proto.mask;
> +	layer = 0;
> +	for (j = 0; j < length - 1; j++) {
> +		if (items[j].type == v_header) {
> +			if (layer == v_layer) {
> +				/* copy out 16 bits from offset */
> +				const uint8_t *pointer;
> +
> +				pointer = &(((const uint8_t
> *)(items[j].spec))[v_offset]);
> +				temp_fv = ntohs((*((const uint16_t *)pointer))
> & v_mask);
> +				fv[2 * offset] = (uint8_t)((temp_fv & 0xff00) >>
> 8);
[Liu, Mingxia] ((temp_fv & 0xff00) >> 8) can be simplified to temp_fv >> 8, there are other places that has the same issue.

> +				fv[2 * offset + 1] = (uint8_t)(temp_fv &
> 0x00ff);
> +				break;
> +			}
> +			layer++;
> +		} /* TODO: more type... */
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_parse_fieldvectors(struct cpfl_itf *itf, struct cpfl_flow_js_fv *js_fvs, int
> size,
> +			uint8_t *fv, const struct rte_flow_item *items)
> +{
> +	int i, ret;
> +
> +	for (i = 0; i < size; i++) {
> +		uint16_t offset, temp_fv, value_int;
> +		enum cpfl_flow_js_fv_type type;
> +		struct cpfl_flow_js_fv *js_fv;
> +
> +		js_fv = &js_fvs[i];
> +		offset = js_fv->offset;
> +		type = js_fv->type;
> +		if (type == CPFL_FV_TYPE_IMMEDIATE) {
> +			value_int = js_fv->immediate;
> +			temp_fv = (value_int << 8) & 0xff00;
> +			fv[2 * offset] = (uint8_t)((temp_fv & 0xff00) >> 8);
> +			fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
> +		} else if (type == CPFL_FV_TYPE_METADATA) {
> +			uint16_t type, v_offset, mask;
> +
> +			type = js_fv->meta.type;
> +			v_offset = js_fv->meta.offset;
> +			mask = js_fv->meta.mask;
> +			temp_fv = cpfl_metadata_read16(&itf->adapter->meta,
> type, v_offset) & mask;
> +			fv[2 * offset] = (uint8_t)(temp_fv & 0x00ff);
> +			fv[2 * offset + 1] = (uint8_t)((temp_fv & 0xff00) >> 8);
> +		} else if (type == CPFL_FV_TYPE_PROTOCOL) {
> +			ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv);
> +			if (ret)
> +				return ret;
> +		} else {
> +			PMD_DRV_LOG(DEBUG, "not support this type: %d.",
> type);
> +			return -EINVAL;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_parse_pr_actions(struct cpfl_itf *itf,
> +		      struct cpfl_flow_js_pr_action *actions,
> +		      int size,
> +		      const struct rte_flow_item *items,
> +		      const struct rte_flow_attr *attr,
> +		      struct cpfl_flow_pr_action *pr_action)
> +{
> +	int i, ret;
> +
> +	for (i = 0; i < size; i++) {
> +		struct cpfl_flow_js_pr_action *pr_act;
> +		enum cpfl_flow_pr_action_type type;
> +
> +		pr_act = &actions[i];
> +		/* pr->actions->type */
> +		type = pr_act->type;
> +		/* pr->actions->data */
> +		if (attr->group % 10 == 1  && type ==
> CPFL_JS_PR_ACTION_TYPE_SEM) {
> +			struct cpfl_flow_js_pr_action_sem *sem = &pr_act-
> >sem;
> +
> +			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
> +			pr_action->sem.prof = sem->prof;
> +			pr_action->sem.subprof = sem->subprof;
> +			pr_action->sem.keysize = sem->keysize;
> +			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
> +			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
> +			ret = cpfl_parse_fieldvectors(itf, sem->fv, sem->fv_size,
> +						      pr_action-
> >sem.cpfl_flow_pr_fv, items);
> +			return ret;
> +		} else if (attr->group > 4 || attr->group == 0) {
> +			return -EPERM;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_str2mac(const char *mask, uint8_t *addr_bytes)
> +{
> +	int i, size, j;
> +	uint8_t n;
> +
> +	size = strlen(mask);
> +	n = 0;
> +	j = 0;
> +	for (i = 0; i < size; i++) {
> +		char ch = mask[i];
> +
> +		if (ch == ':') {
> +			if (j >= RTE_ETHER_ADDR_LEN)
> +				return -EINVAL;
> +			addr_bytes[j++] = n;
> +			n = 0;
> +		} else if (ch >= 'a' && ch <= 'f') {
> +			n = n * 16 + ch - 'a' + 10;
> +		} else if (ch >= 'A' && ch <= 'F') {
> +			n = n * 16 + ch - 'A' + 10;
> +		} else if (ch >= '0' && ch <= '9') {
> +			n = n * 16 + ch - '0';
> +		} else {
> +			return -EINVAL;
> +		}
> +	}
> +	if (j < RTE_ETHER_ADDR_LEN)
> +		addr_bytes[j++] = n;
> +
> +	if (j != RTE_ETHER_ADDR_LEN)
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_eth_mask(const char *mask, const uint8_t
> addr_bytes[RTE_ETHER_ADDR_LEN])
> +{
> +	int i, ret;
> +	uint8_t mask_bytes[RTE_ETHER_ADDR_LEN] = { 0 };
> +
> +	ret = cpfl_str2mac(mask, mask_bytes);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "translate mac address from string to
> uint8_t[] failed.");
> +		return -EINVAL;
> +	}
> +	/* validate eth mask addr if match */
> +	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
> +		if (mask_bytes[i] != addr_bytes[i])
> +			return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
> +{
> +	uint32_t out_addr;
> +
> +	/* 0: success; -EINVAL: invalid; -ENOTSUP: fail */
> +	int ret = inet_pton(AF_INET, mask, &out_addr);
> +
> +	if (ret < 0)
> +		return -EINVAL;
> +	/* validate ipv4 mask addr if match */
> +	if (out_addr != addr)
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct
> rte_flow_item_eth *eth_mask)
> +{
> +	int field_size, j;
> +	int flag_dst_addr, flag_src_addr, flag_ether_type;
> +	struct cpfl_flow_js_pr_key_proto_field *field;
> +
> +	if (!proto)
> +		return 0;
> +	field_size = proto->fields_size;
> +	if (field_size != 0 && !eth_mask)
> +		return -EINVAL;
> +
> +	if (field_size == 0 && eth_mask)
> +		return -EINVAL;
> +
> +	if (field_size == 0 && !eth_mask)
> +		return 0;
> +
> +	flag_dst_addr = false;
> +	flag_src_addr = false;
> +	flag_ether_type = false;
> +	for (j = 0; j < field_size; j++) {
> +		const char *name, *s_mask;
> +
> +		field = &proto->fields[j];
> +		/* match: rte_flow_item_eth.dst, more see Field Mapping
> +		 */
> +		name = field->name;
> +		/* match: rte_flow_item->mask */
> +		if (strcmp(name, "src_addr") == 0) {
> +			s_mask = field->mask;
> +			if (cpfl_check_eth_mask(s_mask, eth_mask-
> >src.addr_bytes) < 0)
> +				return -EINVAL;
> +			flag_src_addr = true;
> +		} else if (strcmp(name, "dst_addr") == 0) {
> +			s_mask = field->mask;
> +			if (cpfl_check_eth_mask(s_mask, eth_mask-
> >dst.addr_bytes) < 0)
> +				return -EINVAL;
> +			flag_dst_addr = true;
> +		} else if (strcmp(name, "ether_type") == 0) {
> +			uint16_t mask = (uint16_t)field->mask_32b;
> +
> +			if (mask != eth_mask->type)
> +				return -EINVAL;
> +			flag_ether_type = true;
> +		} else {
> +			/* TODO: more type... */
> +			PMD_DRV_LOG(ERR, "not support this name.");
> +			return -EINVAL;
> +		}
> +	}
> +	if (!flag_src_addr) {
> +		if (strcmp((const char *)eth_mask->src.addr_bytes,
> "\x00\x00\x00\x00\x00\x00") != 0)
> +			return -EINVAL;
> +	}
> +	if (!flag_dst_addr) {
> +		if (strcmp((const char *)eth_mask->dst.addr_bytes,
> "\x00\x00\x00\x00\x00\x00") != 0)
> +			return -EINVAL;
> +	}
> +	if (!flag_ether_type) {
> +		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
> +			return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct
> rte_flow_item_ipv4 *ipv4_mask)
> +{
> +	int field_size, j;
> +	int flag_next_proto_id, flag_src_addr, flag_dst_addr;
> +	struct cpfl_flow_js_pr_key_proto_field *field;
> +
> +	if (!proto)
> +		return 0;
> +
> +	field_size = proto->fields_size;
> +	if (field_size != 0 && !ipv4_mask)
> +		return -EINVAL;
> +
> +	if (field_size == 0 && ipv4_mask)
> +		return -EINVAL;
> +
> +	if (field_size == 0 && !ipv4_mask)
> +		return 0;
> +
> +	flag_dst_addr = false;
> +	flag_src_addr = false;
> +	flag_next_proto_id = false;
> +	for (j = 0; j < field_size; j++) {
> +		const char *name;
> +
> +		field = &proto->fields[j];
> +		name = field->name;
> +		if (strcmp(name, "src_addr") == 0) {
> +			/* match: rte_flow_item->mask */
> +			const char *mask;
> +
> +			mask = field->mask;
> +			if (cpfl_check_ipv4_mask(mask, ipv4_mask-
> >hdr.src_addr) < 0)
> +				return -EINVAL;
> +			flag_src_addr = true;
> +		} else if (strcmp(name, "dst_addr") == 0) {
> +			const char *mask;
> +
> +			mask = field->mask;
> +			if (cpfl_check_ipv4_mask(mask, ipv4_mask-
> >hdr.dst_addr) < 0)
> +				return -EINVAL;
> +			flag_dst_addr = true;
> +		} else if (strcmp(name, "next_proto_id") == 0) {
> +			uint8_t mask;
> +
> +			mask = (uint8_t)field->mask_32b;
> +			if (mask != ipv4_mask->hdr.next_proto_id)
> +				return -EINVAL;
> +			flag_next_proto_id = true;
> +		} else {
> +			PMD_DRV_LOG(ERR, "not support this name.");
> +			return -EINVAL;
> +		}
> +	}
> +	if (!flag_src_addr) {
> +		if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
> +			return -EINVAL;
> +	}
> +	if (!flag_dst_addr) {
> +		if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
> +			return -EINVAL;
> +	}
> +	if (!flag_next_proto_id) {
> +		if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
> +			return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct
> rte_flow_item_tcp *tcp_mask)
> +{
> +	int field_size, j;
> +	int flag_src_port, flag_dst_port;
> +	struct cpfl_flow_js_pr_key_proto_field *field;
> +
> +	if (!proto)
> +		return 0;
> +
> +	field_size = proto->fields_size;
> +	if (field_size != 0 && !tcp_mask)
> +		return -EINVAL;
> +
> +	if (field_size == 0 && tcp_mask)
> +		return -EINVAL;
> +
> +	if (field_size == 0 && !tcp_mask)
> +		return 0;
> +
> +	flag_src_port = false;
> +	flag_dst_port = false;
> +	for (j = 0; j < field_size; j++) {
> +		const char *name;
> +		uint16_t mask;
> +
> +		field = &proto->fields[j];
> +		/* match: rte_flow_item_eth.dst */
> +		name = field->name;
> +		/* match: rte_flow_item->mask */
> +		mask = (uint16_t)field->mask_32b;
> +		if (strcmp(name, "src_port") == 0) {
> +			if (tcp_mask->hdr.src_port != mask)
> +				return -EINVAL;
> +			flag_src_port = true;
> +		} else if (strcmp(name, "dst_port") == 0) {
> +			if (tcp_mask->hdr.dst_port != mask)
> +				return -EINVAL;
> +			flag_dst_port = true;
> +		} else {
> +			PMD_DRV_LOG(ERR, "not support this name.");
> +			return -EINVAL;
> +		}
> +	}
> +	if (!flag_src_port) {
> +		if (tcp_mask->hdr.src_port != (rte_be16_t)0)
> +			return -EINVAL;
> +	}
> +	if (!flag_dst_port) {
> +		if (tcp_mask->hdr.dst_port != (rte_be16_t)0)
> +			return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct
> rte_flow_item_udp *udp_mask)
> +{
> +	int field_size, j;
> +	bool flag_src_port, flag_dst_port;
> +	struct cpfl_flow_js_pr_key_proto_field *field;
> +
> +	if (!proto)
> +		return 0;
> +	field_size = proto->fields_size;
> +	if (field_size != 0 && !udp_mask)
> +		return -EINVAL;
> +	if (field_size == 0 && udp_mask)
> +		return -EINVAL;
> +	if (field_size == 0 && !udp_mask)
> +		return 0;
> +	flag_src_port = false;
> +	flag_dst_port = false;
> +	for (j = 0; j < field_size; j++) {
> +		const char *name;
> +		uint16_t mask;
> +
> +		field = &proto->fields[j];
> +		/* match: rte_flow_item_eth.dst */
> +		name = field->name; /* match: rte_flow_item->mask */
> +		mask = (uint16_t)field->mask_32b;
> +		if (strcmp(name, "src_port") == 0) {
> +			if (udp_mask->hdr.src_port != mask)
> +				return -EINVAL;
> +			flag_src_port = true;
> +		} else if (strcmp(name, "dst_port") == 0) {
> +			if (udp_mask->hdr.dst_port != mask)
> +				return -EINVAL;
> +			flag_dst_port = true;
> +		} else {
> +			PMD_DRV_LOG(ERR, "not support this name.");
> +			return -EINVAL;
> +		}
> +	}
> +	if (!flag_src_port) {
> +		if (udp_mask->hdr.src_port != (rte_be16_t)0)
> +			return -EINVAL;
> +	}
> +	if (!flag_dst_port) {
> +		if (udp_mask->hdr.dst_port != (rte_be16_t)0)
> +			return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto,
> +		 const struct rte_flow_item_vxlan *vxlan_mask)
> +{
> +	int field_size, j;
> +	struct cpfl_flow_js_pr_key_proto_field *field;
> +
> +	if (!proto)
> +		return 0;
> +	field_size = proto->fields_size;
> +	if (field_size != 0 && !vxlan_mask)
> +		return -EINVAL;
> +	if (field_size == 0 && vxlan_mask)
> +		return -EINVAL;
> +	if (field_size == 0 && !vxlan_mask)
> +		return 0;
> +	for (j = 0; j < field_size; j++) {
> +		const char *name;
> +		int64_t mask;
> +
> +		field = &proto->fields[j];
> +		name = field->name;
> +		/* match: rte_flow_item->mask */
> +		mask = (int64_t)field->mask_32b;
> +		if (strcmp(name, "vx_vni") == 0) {
> +			if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) !=
> mask)
> +				return -EINVAL;
> +		} else {
> +			PMD_DRV_LOG(ERR, "not support this name.");
> +			return -EINVAL;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct
> rte_flow_item_icmp *icmp_mask)
> +{
> +	int field_size;
> +
> +	if (!proto)
> +		return 0;
> +	field_size = proto->fields_size;
> +	if (field_size != 0 && !icmp_mask)
> +		return -EINVAL;
> +	if (field_size == 0 && icmp_mask)
> +		return -EINVAL;
> +	if (field_size == 0 && !icmp_mask)
> +		return 0;
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
> +			     int proto_size,
> +			     const struct rte_flow_item *items)
> +{
> +	int i, length;
> +	int j = 0;
> +
> +	length = cpfl_get_items_length(items);
> +	if (proto_size > length - 1)
> +		return -EINVAL;
> +	for (i = 0; i < proto_size; i++) {
> +		struct cpfl_flow_js_pr_key_proto *key_proto;
> +		enum rte_flow_item_type type;
> +
> +		key_proto = &protocols[i];
> +		/* pr->key->proto->type */
> +		type = key_proto->type;
> +		/* pr->key->proto->fields */
> +		switch (type) {
> +		case RTE_FLOW_ITEM_TYPE_ETH:
> +			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
> +				const struct rte_flow_item_eth *eth_mask;
> +				int ret;
> +
> +				eth_mask = (const struct rte_flow_item_eth
> *)items[i].mask;
> +				ret = cpfl_check_eth(key_proto, eth_mask);
> +				if (ret < 0)
> +					return ret;
> +			} else {
> +				return -EINVAL;
> +			}
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) {
> +				const struct rte_flow_item_ipv4 *ipv4_mask;
> +				int ret;
> +
> +				ipv4_mask = (const struct rte_flow_item_ipv4
> *)items[i].mask;
> +				ret = cpfl_check_ipv4(key_proto, ipv4_mask);
> +				if (ret < 0)
> +					return ret;
> +			} else {
> +				return -EINVAL;
> +			}
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_TCP:
> +			if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) {
> +				const struct rte_flow_item_tcp *tcp_mask;
> +				int ret;
> +
> +				tcp_mask = (const struct rte_flow_item_tcp
> *)items[i].mask;
> +				ret = cpfl_check_tcp(key_proto, tcp_mask);
> +				if (ret < 0)
> +					return ret;
> +			} else {
> +				return -EINVAL;
> +			}
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_UDP:
> +			if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) {
> +				const struct rte_flow_item_udp *udp_mask;
> +				int ret;
> +
> +				udp_mask = (const struct rte_flow_item_udp
> *)items[i].mask;
> +				ret = cpfl_check_udp(key_proto, udp_mask);
> +				if (ret < 0)
> +					return ret;
> +			} else {
> +				return -EINVAL;
> +			}
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_VXLAN:
> +			if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) {
> +				const struct rte_flow_item_vxlan *vxlan_mask;
> +				int ret;
> +
> +				vxlan_mask = (const struct
> rte_flow_item_vxlan *)items[i].mask;
> +				ret = cpfl_check_vxlan(key_proto, vxlan_mask);
> +				if (ret < 0)
> +					return ret;
> +			} else {
> +				return -EINVAL;
> +			}
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_ICMP:
> +			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) {
> +				const struct rte_flow_item_icmp *icmp_mask;
> +				int ret;
> +
> +				icmp_mask = (const struct rte_flow_item_icmp
> *)items[i].mask;
> +				ret = cpfl_check_icmp(key_proto, icmp_mask);
> +				if (ret < 0)
> +					return ret;
> +
> +			} else {
> +				return -EINVAL;
> +			}
> +			break;
> +		default:
> +			PMD_DRV_LOG(ERR, "Not support this type: %d.",
> type);
> +			return -EPERM;
> +		}
> +	}
> +	if (items[j].type != RTE_FLOW_ITEM_TYPE_END)
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr,
> +			    const struct rte_flow_attr *attr)
> +{
> +	/* match: struct rte_flow_attr(ingress,egress) */
> +	if (key_attr->ingress != attr->ingress) {
> +		PMD_DRV_LOG(DEBUG, "ingress not match.");
> +		return -EINVAL;
> +	}
> +	if (key_attr->egress != attr->egress) {
> +		PMD_DRV_LOG(DEBUG, "egress not match.");
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern,
> +		       const struct rte_flow_item *items,
> +		       const struct rte_flow_attr *attr)
> +{
> +	int ret;
> +
> +	/* pr->key */
> +	/* pr->key->protocols */
> +	ret = cpfl_check_pattern_key_proto(pattern->key.protocols,
> +					   pattern->key.proto_size, items);
> +	if (ret < 0)
> +		return -EINVAL;
> +	/* pr->key->attributes */
> +	ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr);
> +	if (ret < 0)
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
> +/* output: struct cpfl_flow_pr_action* pr_action */
> +static int
> +cpfl_parse_pattern_rules(struct cpfl_itf *itf,
> +			 struct cpfl_flow_js_parser *parser,
> +			 const struct rte_flow_item *items,
> +			 const struct rte_flow_attr *attr,
> +			 struct cpfl_flow_pr_action *pr_action)
> +{
> +	int i, size;
> +	struct cpfl_flow_js_pr *pattern;
> +
> +	size = parser->pr_size;
> +	for (i = 0; i < size; i++) {
> +		int ret;
> +
> +		pattern = &parser->patterns[i];
> +		ret = cpfl_check_pattern_key(pattern, items, attr);
> +		if (ret < 0)
> +			continue;
> +		/* pr->actions */
> +		ret = cpfl_parse_pr_actions(itf, pattern->actions, pattern-
> >actions_size,
> +					    items, attr, pr_action);
> +		return ret;
> +	}
> +
> +	return -EINVAL;
> +}
> +
> +int
> +cpfl_flow_parse_items(struct cpfl_itf *itf,
> +		      struct cpfl_flow_js_parser *parser,
> +		      const struct rte_flow_item *items,
> +		      const struct rte_flow_attr *attr,
> +		      struct cpfl_flow_pr_action *pr_action)
> +{
> +	int ret;
> +
> +	/* Pattern Rules */
> +	ret = cpfl_parse_pattern_rules(itf, parser, items, attr, pr_action);
> +	return ret;
> +}
> +
> +/* modifications rules */
> +static int
> +cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap
> *encap,
> +			       const struct rte_flow_action *action)
> +{
> +	const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
> +	struct rte_flow_item *definition;
> +	int def_length, i, proto_size;
> +
> +	action_vxlan_encap = (const struct rte_flow_action_vxlan_encap
> *)action->conf;
> +	definition = action_vxlan_encap->definition;
> +	def_length = cpfl_get_items_length(definition);
> +	proto_size = encap->proto_size;
> +	if (proto_size != def_length - 1) {
> +		PMD_DRV_LOG(DEBUG, "protocols not match.");
> +		return -EINVAL;
> +	}
> +	for (i = 0; i < proto_size; i++) {
> +		enum rte_flow_item_type proto;
> +
> +		proto = encap->protocols[i];
> +		if (proto == RTE_FLOW_ITEM_TYPE_VLAN) {
> +			if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) {
> +				PMD_DRV_LOG(DEBUG, "protocols not
> match.");
> +				return -EINVAL;
> +			}
> +		} else if (proto != definition[i].type) {
> +			PMD_DRV_LOG(DEBUG, "protocols not match.");
> +			return -EINVAL;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +/* output: struct cpfl_flow_mr_key_action *mr_key_action */
> +/* check and parse */
> +static int
> +cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int
> size,
> +			 const struct rte_flow_action *actions,
> +			 struct cpfl_flow_mr_key_action *mr_key_action)
> +{
> +	int actions_length, i;
> +	int j = 0;
> +	int ret;
> +
> +	actions_length = cpfl_get_actions_length(actions);
> +	if (size > actions_length - 1)
> +		return -EINVAL;
> +	for (i = 0; i < size; i++) {
> +		enum rte_flow_action_type type;
> +		struct cpfl_flow_js_mr_key_action *key_act;
> +
> +		key_act = &key_acts[i];
> +		/* mr->key->actions->type */
> +		type = key_act->type;
> +		/* mr->key->actions->data */
> +		/* match: <type> action matches
> RTE_FLOW_ACTION_TYPE_<type> */
> +		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
> +			int proto_size, k;
> +			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
> +
> +			while (j < actions_length &&
> +			       actions[j].type !=
> RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
> +				j++;
> +			}
> +			if (j >= actions_length)
> +				return -EINVAL;
> +			mr_key_action[i].type =
> RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
> +			mr_key_action[i].encap.action = &actions[j];
> +			encap = &mr_key_action[i].encap;
> +
> +			proto_size = key_act->encap.proto_size;
> +			encap->proto_size = proto_size;
> +			for (k = 0; k < proto_size; k++) {
> +				enum rte_flow_item_type proto;
> +
> +				proto = key_act->encap.protocols[k];
> +				encap->protocols[k] = proto;
> +			}
> +			ret = cpfl_check_actions_vxlan_encap(encap,
> &actions[j]);
> +			if (ret < 0)
> +				return -EINVAL;
> +
> +			j++;
> +		} else if (type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
> +			while (j < actions_length &&
> +			       actions[j].type !=
> RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
> +				j++;
> +			}
> +			if (j >= actions_length)
> +				return -EINVAL;
> +
> +			mr_key_action[i].type =
> RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
> +			j++;
> +		} else {
> +			PMD_DRV_LOG(ERR, "Not support this type: %d.",
> type);
> +			return -EPERM;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +/* output: uint8_t *buffer, uint16_t *byte_len */
> +static int
> +cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
> +		  struct cpfl_flow_mr_key_action *mr_key_action,
> +		  uint8_t *buffer, uint16_t *byte_len)
> +{
> +	int i;
> +	int start = 0;
> +
> +	for (i = 0; i < layout_size; i++) {
> +		int index, size, offset;
> +		const char *hint;
> +		const uint8_t *addr;
> +		struct cpfl_flow_mr_key_action *temp;
> +		struct cpfl_flow_js_mr_layout *layout;
> +
> +		layout = &layouts[i];
> +		/* index links to the element of the actions array. */
> +		index = layout->index;
> +		size = layout->size;
> +		offset = layout->offset;
> +		if (index == -1) {
> +			hint = "dummpy";
> +			start += size;
> +			continue;
> +		}
> +		hint = layout->hint;
> +		addr = NULL;
> +		temp = mr_key_action + index;
> +		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
> +			const struct rte_flow_action_vxlan_encap
> *action_vxlan_encap;
> +			struct rte_flow_item *definition;
> +			int def_length, k;
> +
> +			action_vxlan_encap =
> +			    (const struct rte_flow_action_vxlan_encap *)temp-
> >encap.action->conf;
> +			definition = action_vxlan_encap->definition;
> +			def_length = cpfl_get_items_length(definition);
> +			for (k = 0; k < def_length - 1; k++) {
> +				if ((strcmp(hint, "eth") == 0 &&
> +				     definition[k].type ==
> RTE_FLOW_ITEM_TYPE_ETH) ||
> +				    (strcmp(hint, "ipv4") == 0 &&
> +				     definition[k].type ==
> RTE_FLOW_ITEM_TYPE_IPV4) ||
> +				    (strcmp(hint, "udp") == 0 &&
> +				     definition[k].type ==
> RTE_FLOW_ITEM_TYPE_UDP) ||
> +				    (strcmp(hint, "tcp") == 0 &&
> +				     definition[k].type ==
> RTE_FLOW_ITEM_TYPE_TCP) ||
> +				    (strcmp(hint, "vxlan") == 0 &&
> +				     definition[k].type ==
> RTE_FLOW_ITEM_TYPE_VXLAN)) {
> +					addr = (const uint8_t
> *)(definition[k].spec);
> +					if (start > 255) {
> +						*byte_len = 0;
> +						PMD_DRV_LOG(ERR, "byte
> length is too long%s",
> +							    hint);
> +						return -EINVAL;
> +					}
> +					memcpy(buffer + start, addr + offset,
> size);
> +					break;
> +				} /* TODO: more hint... */
> +			}
> +			if (k == def_length - 1) {
> +				*byte_len = 0;
> +				PMD_DRV_LOG(ERR, "can not find
> corresponding hint: %s", hint);
> +				return -EINVAL;
> +			}
> +		} else {
> +			*byte_len = 0;
> +			PMD_DRV_LOG(ERR, "Not support this type: %d.",
> temp->type);
> +			return -EINVAL;
> +		}
> +		/* else TODO: more type... */
> +
> +		start += size;
> +	}
> +	*byte_len = start;
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action,
> +		     struct cpfl_flow_mr_key_action *mr_key_action,
> +		     struct cpfl_flow_mr_action *mr_action)
> +{
> +	enum cpfl_flow_mr_action_type type;
> +
> +	/* mr->action->type */
> +	type = action->type;
> +	/* mr->action->data */
> +	if (type == CPFL_JS_MR_ACTION_TYPE_MOD) {
> +		struct cpfl_flow_js_mr_layout *layout;
> +
> +		mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD;
> +		mr_action->mod.byte_len = 0;
> +		mr_action->mod.prof = action->mod.prof;
> +		layout = action->mod.layout;
> +		if (layout) {
> +			int ret;
> +
> +			memset(mr_action->mod.data, 0, sizeof(mr_action-
> >mod.data));
> +			ret = cpfl_parse_layout(layout, action-
> >mod.layout_size, mr_key_action,
> +						mr_action->mod.data,
> &mr_action->mod.byte_len);
> +			if (ret < 0)
> +				return -EINVAL;
> +		}
> +		return 0;
> +	}
> +	PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
> +
> +	return -EINVAL;
> +}
> +
> +static int
> +cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action
> *actions,
> +		   struct cpfl_flow_mr_key_action *mr_key_action)
> +{
> +	int key_action_size;
> +
> +	/* mr->key->actions */
> +	key_action_size = mr->key.actions_size;
> +	return cpfl_parse_mr_key_action(mr->key.actions, key_action_size,
> actions, mr_key_action);
> +}
> +
> +/* output: struct cpfl_flow_mr_action *mr_action */
> +static int
> +cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct
> rte_flow_action *actions,
> +		     struct cpfl_flow_mr_action *mr_action)
> +{
> +	int i, size;
> +	struct cpfl_flow_mr_key_action
> mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0};
> +
> +	size = parser->mr_size;
> +
> +	for (i = 0; i < size; i++) {
> +		int ret;
> +		struct cpfl_flow_js_mr *mr;
> +
> +		mr = &parser->modifications[i];
> +		ret = cpfl_check_mod_key(mr, actions, mr_key_action);
> +		if (ret < 0)
> +			continue;
> +		/* mr->action */
> +		ret = cpfl_parse_mr_action(&mr->action, mr_key_action,
> mr_action);
> +		if (!ret)
> +			return 0;
> +	}
> +
> +	return -EINVAL;
> +}
> +
> +int
> +cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct
> rte_flow_action *actions,
> +			struct cpfl_flow_mr_action *mr_action)
> +{
> +	/* modifications rules */
> +	if (!parser->modifications) {
> +		PMD_DRV_LOG(INFO, "The modifications is optional.");
> +		return 0;
> +	}
> +
> +	return cpfl_parse_mod_rules(parser, actions, mr_action);
> +}
> +
> +void
> +cpfl_metadata_write16(struct cpfl_metadata *meta, int type, int offset,
> uint16_t data)
> +{
> +	rte_memcpy(&meta->chunks[type].data[offset],
> +		   &data,
> +		   sizeof(uint16_t));
> +}
> +
> +void
> +cpfl_metadata_write32(struct cpfl_metadata *meta, int type, int offset,
> uint32_t data)
> +{
> +	rte_memcpy(&meta->chunks[type].data[offset],
> +		   &data,
> +		   sizeof(uint32_t));
> +}
> +
> +uint16_t
> +cpfl_metadata_read16(struct cpfl_metadata *meta, int type, int offset)
> +{
> +	return *((uint16_t *)(&meta->chunks[type].data[offset]));
> +}
> +
> +bool
> +cpfl_metadata_write_port_id(struct cpfl_itf *itf)
> +{
> +	uint32_t dev_id;
> +	const int type = 0;
> +	const int offset = 5;
> +
> +	dev_id = cpfl_get_port_id(itf);
> +	if (dev_id == CPFL_INVALID_HW_ID) {
> +		PMD_DRV_LOG(ERR, "fail to get hw ID\n");
> +		return false;
> +	}
> +	dev_id = dev_id << 3;
> +	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
[Liu, Mingxia] better to change the type of 'dev_id' to  uint16_t?
> +
> +	return true;
> +}
> +
> +bool
> +cpfl_metadata_write_targetvsi(struct cpfl_itf *itf)
> +{
> +	uint32_t dev_id;
> +	const int type = 6;
> +	const int offset = 2;
> +
> +	dev_id = cpfl_get_vsi_id(itf);
> +	if (dev_id == CPFL_INVALID_HW_ID) {
> +		PMD_DRV_LOG(ERR, "fail to get hw ID");
> +		return false;
> +	}
> +	dev_id = dev_id << 1;
> +	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
> +
[Liu, Mingxia] better to change the type of 'dev_id' to  uint16_t?

> +	return true;
> +}
> +
> +bool
> +cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf)
> +{
> +	uint32_t dev_id;
> +	const int type = 6;
> +	const int offset = 0;
> +
> +	dev_id = cpfl_get_vsi_id(itf);
> +	if (dev_id == CPFL_INVALID_HW_ID) {
> +		PMD_DRV_LOG(ERR, "fail to get hw ID");
> +		return false;
> +	}
> +	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
> +
[Liu, Mingxia] better to change the type of 'dev_id' to  uint16_t?
> +	return true;
> +}
> +
> +void
> +cpfl_metadata_init(struct cpfl_metadata *meta)
> +{
> +	int i;
> +
> +	for (i = 0; i < CPFL_META_LENGTH; i++)
> +		meta->chunks[i].type = i;
> +}
> +
> +bool cpfl_metadata_write_vsi(struct cpfl_itf *itf)
> +{
> +	uint32_t dev_id;
> +	const int type = 0;
> +	const int offset = 24;
> +
> +	dev_id = cpfl_get_vsi_id(itf);
> +	if (dev_id == CPFL_INVALID_HW_ID) {
> +		PMD_DRV_LOG(ERR, "fail to get hw ID");
> +		return false;
> +	}
> +	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
> +
[Liu, Mingxia] better to change the type of 'dev_id' to  uint16_t?

> +	return true;
> +}


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 0/9] add rte flow support for cpfl
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
@ 2023-09-08 16:05               ` Zhang, Yuying
  2023-09-08 16:05                 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
                                   ` (8 more replies)
  2023-09-28  8:44               ` [PATCH v9 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
                                 ` (12 subsequent siblings)
  13 siblings, 9 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-08 16:05 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

This patchset add rte flow support for cpfl driver.
It depends on the following patch set:
http://patchwork.dpdk.org/project/dpdk/cover/20230912173039.1612287-1-beilei.xing@intel.com/

Wenjing Qiao (2):
  net/cpfl: parse flow offloading hint from JSON
  net/cpfl: build action mapping rules from JSON

Yuying Zhang (7):
  net/cpfl: set up flow offloading skeleton
  net/cpfl: set up control path
  net/cpfl: add FXP low level implementation
  net/cpfl: implement FXP rule creation and destroying
  net/cpfl: adapt FXP to flow engine
  net/cpfl: support flow ops on representor
  net/cpfl: support represented port action
---
v10:
* fix ci build issue

v9:
* refine rx queue message process function

v8:
* fix compile issues
* refine document and separate patch with different features

v7:
* refine commit log
* fix compile issues

v6:
* use existed jansson instead of json-c library
* refine "add FXP low level implementation"

V5:
* Add input validation for some functions

 doc/guides/nics/cpfl.rst                |   52 +
 doc/guides/rel_notes/release_23_11.rst  |    1 +
 drivers/net/cpfl/cpfl_actions.h         |  858 +++++++++++
 drivers/net/cpfl/cpfl_controlq.c        |  801 ++++++++++
 drivers/net/cpfl/cpfl_controlq.h        |   75 +
 drivers/net/cpfl/cpfl_ethdev.c          |  392 ++++-
 drivers/net/cpfl/cpfl_ethdev.h          |  128 ++
 drivers/net/cpfl/cpfl_flow.c            |  339 +++++
 drivers/net/cpfl/cpfl_flow.h            |   85 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c |  666 ++++++++
 drivers/net/cpfl/cpfl_flow_parser.c     | 1835 +++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h     |  268 ++++
 drivers/net/cpfl/cpfl_fxp_rule.c        |  263 ++++
 drivers/net/cpfl/cpfl_fxp_rule.h        |   68 +
 drivers/net/cpfl/cpfl_representor.c     |   29 +
 drivers/net/cpfl/cpfl_rules.c           |  127 ++
 drivers/net/cpfl/cpfl_rules.h           |  306 ++++
 drivers/net/cpfl/cpfl_vchnl.c           |  144 ++
 drivers/net/cpfl/meson.build            |   12 +
 19 files changed, 6448 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON
  2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
@ 2023-09-08 16:05                 ` Zhang, Yuying
  2023-09-08 16:05                 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
                                   ` (7 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-08 16:05 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Add devargs "flow_parser" to specify the path of a JSON
configure file. The cpfl PMD use the JSON configuration file
to translate rte_flow tokens into low level hardware
representation.

Example:
    -a ca:00.0,flow_parser="refpkg.json"

jansson library is used to parse JSON syntax.

In this patch, The parser only builds rules which maps from
a set of rte_flow items to hardware representations. The rules
that maps from rte_flow actions will be enabled in a separate
patch to avoid a big size patch.

Note, the JSON configuration file is provided by the hardware vendor
and is intended to work exclusively with a specific P4 pipeline
configuration, which must be compiled and programmed into the hardware.

The format of the JSON file strictly follows the internal specifications
of the hardware vendor and is not meant to be modified directly by
users.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 doc/guides/nics/cpfl.rst            |   38 +
 drivers/net/cpfl/cpfl_ethdev.c      |   38 +-
 drivers/net/cpfl/cpfl_ethdev.h      |   76 ++
 drivers/net/cpfl/cpfl_flow_parser.c | 1299 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  168 ++++
 drivers/net/cpfl/meson.build        |    7 +
 6 files changed, 1625 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 83a18c3f2e..e17347d15c 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -128,12 +128,32 @@ Runtime Configuration
 
     -a BDF,representor=vf[0-3],representor=c1pf1
 
+- ``flow_parser`` (default ``not enabled``)
+
+  The cpfl PMD supports utilizing a JSON config file to translate rte_flow tokens into
+  low-level hardware resources.
+
+  The JSON configuration file is provided by the hardware vendor and is intended to work
+  exclusively with a specific P4 pipeline configuration, which must be compiled and programmed
+  into the hardware.
+
+  The format of the JSON file strictly follows the internal specifications of the hardware
+  vendor and is not meant to be modified directly by users.
+
+  Using the ``devargs`` option ``flow_parser`` the user can specify the path
+  of a json file, for example::
+
+    -a ca:00.0,flow_parser="refpkg.json"
+
+  Then the PMD will load json file for device ``ca:00.0``.
+  The parameter is optional.
 
 Driver compilation and testing
 ------------------------------
 
 Refer to the document :doc:`build_and_test` for details.
 
+The jansson library must be installed to use rte_flow.
 
 Features
 --------
@@ -164,3 +184,21 @@ Hairpin queue
 E2100 Series can loopback packets from RX port to TX port.
 This feature is called port-to-port or hairpin.
 Currently, the PMD only supports single port hairpin.
+
+Rte_flow
+~~~~~~~~~~~~~
+
+PMD uses a json file to direct CPF PMD to parse rte_flow tokens into
+low level hardware resources.
+
+- Required Libraries
+
+  * jansson
+
+    * For Ubuntu, it can be installed using `apt install libjansson-dev`
+
+- run testpmd with the json file
+
+   .. code-block:: console
+
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 189072ab33..1745f703c8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -21,6 +21,10 @@
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
 
+#ifdef RTE_HAS_JANSSON
+#define CPFL_FLOW_PARSER	"flow_parser"
+#endif
+
 rte_spinlock_t cpfl_adapter_lock;
 /* A list for all adapters, one adapter matches one PCI device */
 struct cpfl_adapter_list cpfl_adapter_list;
@@ -31,6 +35,9 @@ static const char * const cpfl_valid_args_first[] = {
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
+#ifdef RTE_HAS_JANSSON
+	CPFL_FLOW_PARSER,
+#endif
 	NULL
 };
 
@@ -1537,6 +1544,24 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 	return 0;
 }
 
+#ifdef RTE_HAS_JANSSON
+static int
+parse_file(const char *key, const char *value, void *args)
+{
+	char *name = args;
+
+	if (strlen(value) > CPFL_FLOW_FILE_LEN - 1) {
+		PMD_DRV_LOG(ERR, "file path(%s) is too long.", value);
+		return -1;
+	}
+
+	PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key);
+	strlcpy(name, value, CPFL_FLOW_FILE_LEN);
+
+	return 0;
+}
+#endif
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
@@ -1585,7 +1610,18 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 				 &adapter->base.is_rx_singleq);
 	if (ret != 0)
 		goto fail;
-
+#ifdef RTE_HAS_JANSSON
+	if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) {
+		ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER,
+					 &parse_file, cpfl_args->flow_parser);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to parser flow_parser, ret: %d", ret);
+			goto fail;
+		}
+	} else {
+		cpfl_args->flow_parser[0] = '\0';
+	}
+#endif
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index d0dcc0cc05..383dbd14c6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -77,6 +77,11 @@
 #define CPFL_VPORT_LAN_PF	0
 #define CPFL_VPORT_LAN_VF	1
 
+#define CPFL_FLOW_FILE_LEN 100
+#define CPFL_INVALID_HW_ID	UINT16_MAX
+#define CPFL_META_CHUNK_LENGTH	1024
+#define CPFL_META_LENGTH	32
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -99,6 +104,7 @@ struct cpfl_devargs {
 	uint16_t req_vport_nb;
 	uint8_t repr_args_num;
 	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
+	char flow_parser[CPFL_FLOW_FILE_LEN];
 };
 
 struct p2p_queue_chunks_info {
@@ -165,6 +171,20 @@ struct cpfl_repr {
 	bool func_up; /* If the represented function is up */
 };
 
+struct cpfl_metadata_chunk {
+	int type;
+	uint8_t data[CPFL_META_CHUNK_LENGTH];
+};
+
+/**
+ * It is driver's responsibility to simlulate a metadata buffer which
+ * can be used as data source to fill the key of a flow rule.
+ */
+struct cpfl_metadata {
+	int length;
+	struct cpfl_metadata_chunk chunks[CPFL_META_LENGTH];
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -185,6 +205,8 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
+
+	struct cpfl_metadata meta;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -211,4 +233,58 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
+static inline uint16_t
+cpfl_get_port_id(struct cpfl_itf *itf)
+{
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		struct cpfl_vport *vport = (void *)itf;
+
+		return vport->base.devarg_id;
+	}
+
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline uint16_t
+cpfl_get_vsi_id(struct cpfl_itf *itf)
+{
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_vport_info *info;
+	uint32_t vport_id;
+	int ret;
+	struct cpfl_vport_id vport_identity;
+
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		struct cpfl_repr *repr = (void *)itf;
+
+		return repr->vport_info->vport.info.vsi_id;
+	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
+
+		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		/* host: CPFL_HOST0_CPF_ID, acc: CPFL_ACC_CPF_ID */
+		vport_identity.pf_id = CPFL_ACC_CPF_ID;
+		vport_identity.vf_id = 0;
+		vport_identity.vport_id = vport_id;
+		ret = rte_hash_lookup_data(adapter->vport_map_hash,
+					   &vport_identity,
+					   (void **)&info);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "vport id not exist");
+			goto err;
+		}
+
+		return info->vport.info.vsi_id;
+	}
+
+err:
+	return CPFL_INVALID_HW_ID;
+}
+
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
new file mode 100644
index 0000000000..a5fff5a857
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -0,0 +1,1299 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <arpa/inet.h>
+
+#include "cpfl_flow_parser.h"
+
+static enum rte_flow_item_type
+cpfl_get_item_type_by_str(const char *type)
+{
+	if (strcmp(type, "eth") == 0)
+		return RTE_FLOW_ITEM_TYPE_ETH;
+	else if (strcmp(type, "ipv4") == 0)
+		return RTE_FLOW_ITEM_TYPE_IPV4;
+	else if (strcmp(type, "tcp") == 0)
+		return RTE_FLOW_ITEM_TYPE_TCP;
+	else if (strcmp(type, "udp") == 0)
+		return RTE_FLOW_ITEM_TYPE_UDP;
+	else if (strcmp(type, "vxlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VXLAN;
+	else if (strcmp(type, "icmp") == 0)
+		return RTE_FLOW_ITEM_TYPE_ICMP;
+	else if (strcmp(type, "vlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VLAN;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ITEM_TYPE_VOID;
+}
+
+static const char *
+cpfl_json_t_to_string(json_t *object, const char *name)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return NULL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return NULL;
+	}
+
+	return json_string_value(subobject);
+}
+
+static int
+cpfl_json_t_to_uint16(json_t *object, const char *name, uint16_t *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (uint16_t)json_integer_value(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_json_t_to_uint32(json_t *object, const char *name, uint32_t *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (uint32_t)json_integer_value(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_attr(json_t *ob_pr_key_attrs, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len;
+	struct cpfl_flow_js_pr_key_attr *attr;
+
+	len = json_array_size(ob_pr_key_attrs);
+	js_pr->key.attributes = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_attr), 0);
+	if (!js_pr->key.attributes) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->key.attr_size = len;
+	attr = js_pr->key.attributes;
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		const char *name;
+		uint16_t value = 0;
+		int ret;
+
+		object = json_array_get(ob_pr_key_attrs, i);
+		name = cpfl_json_t_to_string(object, "Name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'Name'.");
+			goto err;
+		}
+		ret = cpfl_json_t_to_uint16(object, "Value", &value);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+			goto err;
+		}
+		if (strcmp(name, "ingress") == 0) {
+			attr->ingress = value;
+		} else if (strcmp(name, "egress") == 0) {
+			attr->egress = value;
+		} else {
+			/* TODO: more... */
+			PMD_DRV_LOG(ERR, "Not support attr name: %s.", name);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	rte_free(js_pr->key.attributes);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto_field(json_t *ob_fields,
+				     struct cpfl_flow_js_pr_key_proto *js_field)
+{
+	int len, i;
+
+	if (!ob_fields)
+		return 0;
+	len = json_array_size(ob_fields);
+	if (len == 0)
+		return 0;
+	js_field->fields_size = len;
+	js_field->fields =
+	    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len, 0);
+	if (!js_field->fields) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		const char *name, *mask;
+
+		object = json_array_get(ob_fields, i);
+		name = cpfl_json_t_to_string(object, "name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
+			goto err;
+		}
+		if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+			PMD_DRV_LOG(ERR, "The 'name' is too long.");
+			goto err;
+		}
+		memcpy(js_field->fields[i].name, name, strlen(name));
+
+		if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
+		    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			mask = cpfl_json_t_to_string(object, "mask");
+			if (!mask) {
+				PMD_DRV_LOG(ERR, "Can not parse string 'mask'.");
+				goto err;
+			}
+			if (strlen(mask) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+				PMD_DRV_LOG(ERR, "The 'mask' is too long.");
+				goto err;
+			}
+			memcpy(js_field->fields[i].mask, mask, strlen(mask));
+		} else {
+			uint32_t mask_32b;
+			int ret;
+
+			ret = cpfl_json_t_to_uint32(object, "mask", &mask_32b);
+			if (ret < 0) {
+				PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'.");
+				goto err;
+			}
+			js_field->fields[i].mask_32b = mask_32b;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_field->fields);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto(json_t *ob_pr_key_protos, struct cpfl_flow_js_pr *js_pr)
+{
+	int len, i, ret;
+
+	len = json_array_size(ob_pr_key_protos);
+	if (len == 0)
+		return 0;
+	js_pr->key.proto_size = len;
+	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto) * len, 0);
+	if (!js_pr->key.protocols) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_t *object, *ob_fields;
+		const char *type;
+		enum rte_flow_item_type item_type;
+
+		object = json_array_get(ob_pr_key_protos, i);
+		/* pr->key->proto->type */
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		item_type = cpfl_get_item_type_by_str(type);
+		if (item_type == RTE_FLOW_ITEM_TYPE_VOID)
+			goto err;
+		js_pr->key.protocols[i].type = item_type;
+		/* pr->key->proto->fields */
+		ob_fields = json_object_get(object, "fields");
+		ret = cpfl_flow_js_pattern_key_proto_field(ob_fields,
+							   &js_pr->key.protocols[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	rte_free(js_pr->key.protocols);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_proto(json_t *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	uint16_t layer = 0, offset = 0, mask = 0;
+	const char *header;
+	enum rte_flow_item_type type;
+	int ret;
+
+	ret = cpfl_json_t_to_uint16(ob_value, "layer", &layer);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+		return -EINVAL;
+	}
+
+	header = cpfl_json_t_to_string(ob_value, "header");
+	if (!header) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "offset", &offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "mask", &mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
+		return -EINVAL;
+	}
+	type = cpfl_get_item_type_by_str(header);
+	if (type == RTE_FLOW_ITEM_TYPE_VOID)
+		return -EINVAL;
+	js_fv->proto.layer = layer;
+	js_fv->proto.offset = offset;
+	js_fv->proto.mask = mask;
+	js_fv->proto.header = type;
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_metadata(json_t *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	int ret;
+
+	ret = cpfl_json_t_to_uint16(ob_value, "type", &js_fv->meta.type);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "offset", &js_fv->meta.offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "mask", &js_fv->meta.mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv(json_t *ob_fvs, struct cpfl_flow_js_pr_action *js_act)
+{
+	int len, i;
+
+	len = json_array_size(ob_fvs);
+	if (len == 0)
+		return 0;
+	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0);
+	if (!js_act->sem.fv) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_act->sem.fv_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_fv *js_fv;
+		json_t *object, *ob_value;
+		uint16_t offset = 0;
+		const char *type;
+		int ret;
+
+		js_fv = &js_act->sem.fv[i];
+		object = json_array_get(ob_fvs, i);
+		ret = cpfl_json_t_to_uint16(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_fv->offset = offset;
+
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		ob_value = json_object_get(object, "value");
+		if (strcmp(type, "immediate") == 0) {
+			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
+			js_fv->immediate = json_integer_value(ob_value);
+		} else if (strcmp(type, "metadata") == 0) {
+			js_fv->type = CPFL_FV_TYPE_METADATA;
+			cpfl_flow_js_pattern_act_fv_metadata(ob_value, js_fv);
+		} else if (strcmp(type, "protocol") == 0) {
+			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
+			cpfl_flow_js_pattern_act_fv_proto(ob_value, js_fv);
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_act->sem.fv);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_per_act(json_t *ob_per_act, struct cpfl_flow_js_pr_action *js_act)
+{
+	const char *type;
+	int ret;
+
+	/* pr->actions->type */
+	type = cpfl_json_t_to_string(ob_per_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* pr->actions->data */
+	if (strcmp(type, "sem") == 0) {
+		json_t *ob_fvs, *ob_sem;
+
+		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+		ob_sem = json_object_get(ob_per_act, "data");
+		ret = cpfl_json_t_to_uint16(ob_sem, "profile", &js_act->sem.prof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_t_to_uint16(ob_sem, "subprofile", &js_act->sem.subprof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_t_to_uint16(ob_sem, "keysize", &js_act->sem.keysize);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
+			return -EINVAL;
+		}
+		ob_fvs = json_object_get(ob_sem, "fieldvectors");
+		ret = cpfl_flow_js_pattern_act_fv(ob_fvs, js_act);
+		if (ret < 0)
+			return ret;
+	} else {
+		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act(json_t *ob_pr_acts, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len, ret;
+
+	len = json_array_size(ob_pr_acts);
+	if (len == 0)
+		return 0;
+	js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action) * len, 0);
+	if (!js_pr->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->actions_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_pr_action *js_act;
+		json_t *object;
+
+		object = json_array_get(ob_pr_acts, i);
+		js_act = &js_pr->actions[i];
+		ret = cpfl_flow_js_pattern_per_act(object, js_act);
+		if (ret < 0) {
+			rte_free(js_pr->actions);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * The patterns object array defines a set of rules directing the PMD to match sequences of
+ * rte_flow protocol headers and translate them into profile/field vectors for each pipeline
+ * stage. This object is mandatory.
+ */
+static int
+cpfl_flow_js_pattern_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_t *ob_prs;
+	int i, len;
+
+	/* Pattern Rules */
+	ob_prs = json_object_get(ob_root, "patterns");
+	if (!ob_prs) {
+		PMD_DRV_LOG(ERR, "The patterns is mandatory.");
+		return -EINVAL;
+	}
+
+	len = json_array_size(ob_prs);
+	if (len == 0)
+		return 0;
+	parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len, 0);
+	if (!parser->patterns) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	parser->pr_size = len;
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		json_t *ob_pr_actions;
+		json_t *ob_pr_key;
+		json_t *ob_pr_key_protos;
+		json_t *ob_pr_key_attrs;
+		int ret;
+
+		object = json_array_get(ob_prs, i);
+		/* pr->key */
+		ob_pr_key = json_object_get(object, "key");
+		/* pr->key->protocols */
+		ob_pr_key_protos = json_object_get(ob_pr_key, "protocols");
+		ret = cpfl_flow_js_pattern_key_proto(ob_pr_key_protos, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->protocols.");
+			goto err;
+		}
+		/* pr->key->attributes */
+		ob_pr_key_attrs = json_object_get(ob_pr_key, "attributes");
+		ret = cpfl_flow_js_pattern_key_attr(ob_pr_key_attrs, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->attributes.");
+			goto err;
+		}
+		/* pr->actions */
+		ob_pr_actions = json_object_get(object, "actions");
+		ret = cpfl_flow_js_pattern_act(ob_pr_actions, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->patterns);
+	return -EINVAL;
+}
+
+static int
+cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	int ret = 0;
+
+	ret = cpfl_flow_js_pattern_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char *filename)
+{
+	struct cpfl_flow_js_parser *parser;
+	json_error_t json_error;
+	json_t *root;
+	int ret;
+
+	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser), 0);
+	if (!parser) {
+		PMD_DRV_LOG(ERR, "Not enough memory to create flow parser.");
+		return -ENOMEM;
+	}
+	root = json_load_file(filename, 0, &json_error);
+	if (!root) {
+		PMD_DRV_LOG(ERR, "Bad JSON file \"%s\": %s", filename, json_error.text);
+		goto free_parser;
+	}
+	ret = cpfl_parser_init(root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parser init failed.");
+		goto free_parser;
+	}
+	*flow_parser = parser;
+	json_decref(root);
+
+	return 0;
+free_parser:
+	rte_free(parser);
+	return -EINVAL;
+}
+
+static void
+cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
+{
+	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM)
+		rte_free(pr_act->sem.fv);
+}
+
+int
+cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
+{
+	int i, j;
+
+	if (!parser)
+		return 0;
+
+	for (i = 0; i < parser->pr_size; i++) {
+		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
+
+		if (!pattern)
+			continue;
+		for (j = 0; j < pattern->key.proto_size; j++)
+			rte_free(pattern->key.protocols[j].fields);
+		rte_free(pattern->key.protocols);
+		rte_free(pattern->key.attributes);
+
+		for (j = 0; j < pattern->actions_size; j++) {
+			struct cpfl_flow_js_pr_action *pr_act;
+
+			pr_act = &pattern->actions[j];
+			cpfl_parser_free_pr_action(pr_act);
+		}
+		rte_free(pattern->actions);
+	}
+	rte_free(parser->patterns);
+	rte_free(parser);
+
+	return 0;
+}
+
+static int
+cpfl_get_items_length(const struct rte_flow_item *items)
+{
+	int length = 0;
+	const struct rte_flow_item *item = items;
+
+	while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
+		       uint16_t offset, uint8_t *fv)
+{
+	uint16_t v_layer, v_offset, v_mask;
+	enum rte_flow_item_type v_header;
+	int j, layer, length;
+	uint16_t temp_fv;
+
+	length = cpfl_get_items_length(items);
+	v_layer = js_fv->proto.layer;
+	v_header = js_fv->proto.header;
+	v_offset = js_fv->proto.offset;
+	v_mask = js_fv->proto.mask;
+	layer = 0;
+	for (j = 0; j < length - 1; j++) {
+		if (items[j].type == v_header) {
+			if (layer == v_layer) {
+				/* copy out 16 bits from offset */
+				const uint8_t *pointer;
+
+				pointer = &(((const uint8_t *)(items[j].spec))[v_offset]);
+				temp_fv = ntohs((*((const uint16_t *)pointer)) & v_mask);
+				fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+				fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+				break;
+			}
+			layer++;
+		} /* TODO: more type... */
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_fieldvectors(struct cpfl_itf *itf, struct cpfl_flow_js_fv *js_fvs, int size,
+			uint8_t *fv, const struct rte_flow_item *items)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		uint16_t offset, temp_fv, value_int;
+		enum cpfl_flow_js_fv_type type;
+		struct cpfl_flow_js_fv *js_fv;
+
+		js_fv = &js_fvs[i];
+		offset = js_fv->offset;
+		type = js_fv->type;
+		if (type == CPFL_FV_TYPE_IMMEDIATE) {
+			value_int = js_fv->immediate;
+			temp_fv = (value_int << 8) & 0xff00;
+			fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+		} else if (type == CPFL_FV_TYPE_METADATA) {
+			uint16_t type, v_offset, mask;
+
+			type = js_fv->meta.type;
+			v_offset = js_fv->meta.offset;
+			mask = js_fv->meta.mask;
+			temp_fv = cpfl_metadata_read16(&itf->adapter->meta, type, v_offset) & mask;
+			fv[2 * offset] = (uint8_t)(temp_fv & 0x00ff);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv >> 8);
+		} else if (type == CPFL_FV_TYPE_PROTOCOL) {
+			ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv);
+			if (ret)
+				return ret;
+		} else {
+			PMD_DRV_LOG(DEBUG, "not support this type: %d.", type);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_pr_actions(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_pr_action *actions,
+		      int size,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		struct cpfl_flow_js_pr_action *pr_act;
+		enum cpfl_flow_pr_action_type type;
+
+		pr_act = &actions[i];
+		/* pr->actions->type */
+		type = pr_act->type;
+		/* pr->actions->data */
+		if (attr->group == 1  && type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+			struct cpfl_flow_js_pr_action_sem *sem = &pr_act->sem;
+
+			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+			pr_action->sem.prof = sem->prof;
+			pr_action->sem.subprof = sem->subprof;
+			pr_action->sem.keysize = sem->keysize;
+			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
+			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
+			ret = cpfl_parse_fieldvectors(itf, sem->fv, sem->fv_size,
+						      pr_action->sem.cpfl_flow_pr_fv, items);
+			return ret;
+		} else if (attr->group > 4 || attr->group == 0) {
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_eth_mask(const char *mask, struct rte_ether_addr addr)
+{
+	int i, ret;
+	struct rte_ether_addr mask_bytes;
+
+	ret = rte_ether_unformat_addr(mask, &mask_bytes);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "translate mac address from string to rte_ether_addr failed.");
+		return -EINVAL;
+	}
+	/* validate eth mask addr if match */
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+		if (mask_bytes.addr_bytes[i] != addr.addr_bytes[i])
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
+{
+	uint32_t out_addr;
+
+	/* 0: success; -EINVAL: invalid; -ENOTSUP: fail */
+	int ret = inet_pton(AF_INET, mask, &out_addr);
+
+	if (ret < 0)
+		return -EINVAL;
+	/* validate ipv4 mask addr if match */
+	if (out_addr != addr)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_eth *eth_mask)
+{
+	int field_size, j;
+	int flag_dst_addr, flag_src_addr, flag_ether_type;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !eth_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_ether_type = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name, *s_mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst, more see Field Mapping
+		 */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		if (strcmp(name, "src_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->src) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->dst) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "ether_type") == 0) {
+			uint16_t mask = (uint16_t)field->mask_32b;
+
+			if (mask != eth_mask->type)
+				return -EINVAL;
+			flag_ether_type = true;
+		} else {
+			/* TODO: more type... */
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (strcmp((const char *)eth_mask->src.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (strcmp((const char *)eth_mask->dst.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_ether_type) {
+		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+	int field_size, j;
+	int flag_next_proto_id, flag_src_addr, flag_dst_addr;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !ipv4_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_next_proto_id = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+
+		field = &proto->fields[j];
+		name = field->name;
+		if (strcmp(name, "src_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.src_addr) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.dst_addr) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "next_proto_id") == 0) {
+			uint8_t mask;
+
+			mask = (uint8_t)field->mask_32b;
+			if (mask != ipv4_mask->hdr.next_proto_id)
+				return -EINVAL;
+			flag_next_proto_id = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_next_proto_id) {
+		if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_tcp *tcp_mask)
+{
+	int field_size, j;
+	int flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !tcp_mask)
+		return 0;
+
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (tcp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (tcp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (tcp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (tcp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_udp *udp_mask)
+{
+	int field_size, j;
+	bool flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && !udp_mask)
+		return 0;
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst */
+		name = field->name; /* match: rte_flow_item->mask */
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (udp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (udp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name: %s.", name);
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (udp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (udp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto,
+		 const struct rte_flow_item_vxlan *vxlan_mask)
+{
+	int field_size, j;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && !vxlan_mask)
+		return 0;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		int64_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		mask = (int64_t)field->mask_32b;
+		if (strcmp(name, "vx_vni") == 0) {
+			if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) != mask)
+				return -EINVAL;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_icmp *icmp_mask)
+{
+	int field_size;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if ((field_size != 0 && !icmp_mask) || (field_size == 0 && icmp_mask))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
+			     int proto_size,
+			     const struct rte_flow_item *items)
+{
+	int i, length;
+	int j = 0;
+
+	length = cpfl_get_items_length(items);
+	if (proto_size > length - 1)
+		return -EINVAL;
+	for (i = 0; i < proto_size; i++) {
+		struct cpfl_flow_js_pr_key_proto *key_proto;
+		enum rte_flow_item_type type;
+
+		key_proto = &protocols[i];
+		/* pr->key->proto->type */
+		type = key_proto->type;
+		/* pr->key->proto->fields */
+		switch (type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
+				const struct rte_flow_item_eth *eth_mask;
+				int ret;
+
+				eth_mask = (const struct rte_flow_item_eth *)items[i].mask;
+				ret = cpfl_check_eth(key_proto, eth_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				const struct rte_flow_item_ipv4 *ipv4_mask;
+				int ret;
+
+				ipv4_mask = (const struct rte_flow_item_ipv4 *)items[i].mask;
+				ret = cpfl_check_ipv4(key_proto, ipv4_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) {
+				const struct rte_flow_item_tcp *tcp_mask;
+				int ret;
+
+				tcp_mask = (const struct rte_flow_item_tcp *)items[i].mask;
+				ret = cpfl_check_tcp(key_proto, tcp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) {
+				const struct rte_flow_item_udp *udp_mask;
+				int ret;
+
+				udp_mask = (const struct rte_flow_item_udp *)items[i].mask;
+				ret = cpfl_check_udp(key_proto, udp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+				const struct rte_flow_item_vxlan *vxlan_mask;
+				int ret;
+
+				vxlan_mask = (const struct rte_flow_item_vxlan *)items[i].mask;
+				ret = cpfl_check_vxlan(key_proto, vxlan_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) {
+				const struct rte_flow_item_icmp *icmp_mask;
+				int ret;
+
+				icmp_mask = (const struct rte_flow_item_icmp *)items[i].mask;
+				ret = cpfl_check_icmp(key_proto, icmp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+	if (items[j].type != RTE_FLOW_ITEM_TYPE_END)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr,
+			    const struct rte_flow_attr *attr)
+{
+	if (key_attr->ingress != attr->ingress) {
+		PMD_DRV_LOG(DEBUG, "ingress not match.");
+		return -EINVAL;
+	}
+	if (key_attr->egress != attr->egress) {
+		PMD_DRV_LOG(DEBUG, "egress not match.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern,
+		       const struct rte_flow_item *items,
+		       const struct rte_flow_attr *attr)
+{
+	int ret;
+
+	/* pr->key */
+	/* pr->key->protocols */
+	ret = cpfl_check_pattern_key_proto(pattern->key.protocols,
+					   pattern->key.proto_size, items);
+	if (ret < 0)
+		return -EINVAL;
+	/* pr->key->attributes */
+	ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr);
+	if (ret < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* output: struct cpfl_flow_pr_action* pr_action */
+int
+cpfl_flow_parse_items(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_parser *parser,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, size;
+	struct cpfl_flow_js_pr *pattern;
+
+	size = parser->pr_size;
+	for (i = 0; i < size; i++) {
+		int ret;
+
+		pattern = &parser->patterns[i];
+		ret = cpfl_check_pattern_key(pattern, items, attr);
+		if (ret < 0)
+			continue;
+		/* pr->actions */
+		ret = cpfl_parse_pr_actions(itf, pattern->actions, pattern->actions_size,
+					    items, attr, pr_action);
+		return ret;
+	}
+
+	return -EINVAL;
+}
+
+bool
+cpfl_metadata_write_port_id(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 5;
+
+	dev_id = cpfl_get_port_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID\n");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 3);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_targetvsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 2;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 1);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 0;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
+
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 24;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
new file mode 100644
index 0000000000..268e1bc89f
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <jansson.h>
+#include <rte_flow.h>
+
+#include "cpfl_ethdev.h"
+
+#ifndef _CPFL_FLOW_PARSER_H_
+#define _CPFL_FLOW_PARSER_H_
+
+#define CPFL_FLOW_JSON_STR_SIZE_MAX 100
+#define CPFL_MAX_SEM_FV_KEY_SIZE 64
+#define CPFL_FLOW_JS_PROTO_SIZE 16
+#define CPFL_MOD_KEY_NUM_MAX 8
+
+/* Pattern Rules Storage */
+enum cpfl_flow_pr_action_type {
+	CPFL_JS_PR_ACTION_TYPE_SEM,
+	CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
+};
+
+/* This structure matches a sequence of fields in struct rte_flow_attr */
+struct cpfl_flow_js_pr_key_attr {
+	uint16_t ingress;
+	uint16_t egress;
+};
+
+struct cpfl_flow_js_pr_key_proto_field {
+	char name[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	union {
+		char mask[CPFL_FLOW_JSON_STR_SIZE_MAX];
+		uint32_t mask_32b;
+	};
+};
+
+/* This structure matches a sequence of "struct rte_flow_item" */
+struct cpfl_flow_js_pr_key_proto {
+	enum rte_flow_item_type type;
+	struct cpfl_flow_js_pr_key_proto_field *fields;
+	int fields_size;
+};
+
+enum cpfl_flow_js_fv_type {
+	CPFL_FV_TYPE_PROTOCOL,
+	CPFL_FV_TYPE_IMMEDIATE,
+	CPFL_FV_TYPE_METADATA,
+	CPFL_FV_TYPE_UNKNOWN = -1,
+};
+
+struct cpfl_flow_js_fv {
+	uint16_t offset;
+	enum cpfl_flow_js_fv_type type;
+	union {
+		/*  a 16 bits value */
+		uint16_t immediate;
+		/* a reference to a protocol header with a <header, layer, offset, mask> tuple */
+		struct {
+			enum rte_flow_item_type header;
+			uint16_t layer;
+			uint16_t offset;
+			uint16_t mask;
+		} proto;
+		/* a reference to a metadata */
+		struct {
+			uint16_t type;
+			uint16_t offset;
+			uint16_t mask;
+		} meta;
+	};
+};
+
+/**
+ * This structure defines the message be used to composite the
+ * profile / key of a SEM control packet
+ */
+struct cpfl_flow_js_pr_action_sem {
+	uint16_t prof;		    /* SEM profile ID */
+	uint16_t subprof;	    /* SEM subprofile ID */
+	uint16_t keysize;	    /*  extract key size in bytes */
+	struct cpfl_flow_js_fv *fv; /* A SEM field vector array */
+	int fv_size;
+};
+
+/* define how to map current key to low level pipeline configuration */
+struct cpfl_flow_js_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_js_pr_action_sem sem;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD how to parse rte_flow
+ * protocol headers. Each rule be described by a key object and a action array.
+ */
+struct cpfl_flow_js_pr {
+	struct {
+		struct cpfl_flow_js_pr_key_proto *protocols;
+		uint16_t proto_size;
+		struct cpfl_flow_js_pr_key_attr *attributes;
+		uint16_t attr_size;
+	} key;
+	/* An array to define how to map current key to low level pipeline configuration. */
+	struct cpfl_flow_js_pr_action *actions;
+	uint16_t actions_size;
+};
+
+struct cpfl_flow_js_parser {
+	struct cpfl_flow_js_pr *patterns;
+	int pr_size;
+};
+
+/* Pattern Rules */
+struct cpfl_flow_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	uint8_t cpfl_flow_pr_fv[CPFL_MAX_SEM_FV_KEY_SIZE];
+};
+
+struct cpfl_flow_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_pr_action_sem sem;
+	};
+};
+
+int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
+int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
+int cpfl_flow_parse_items(struct cpfl_itf *itf,
+			  struct cpfl_flow_js_parser *parser,
+			  const struct rte_flow_item *items,
+			  const struct rte_flow_attr *attr,
+			  struct cpfl_flow_pr_action *pr_action);
+bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf);
+
+static inline void
+cpfl_metadata_init(struct cpfl_metadata *meta)
+{
+	int i;
+
+	for (i = 0; i < CPFL_META_LENGTH; i++)
+		meta->chunks[i].type = i;
+}
+
+static inline void
+cpfl_metadata_write16(struct cpfl_metadata *meta, int type, int offset, uint16_t data)
+{
+	memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint16_t));
+}
+
+static inline void
+cpfl_metadata_write32(struct cpfl_metadata *meta, int type, int offset, uint32_t data)
+{
+	memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint32_t));
+}
+
+static inline uint16_t
+cpfl_metadata_read16(struct cpfl_metadata *meta, int type, int offset)
+{
+	return *((uint16_t *)(&meta->chunks[type].data[offset]));
+}
+
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d8b92ae16a..d767818eb7 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -38,3 +38,10 @@ if arch_subdir == 'x86'
         cflags += ['-DCC_AVX512_SUPPORT']
     endif
 endif
+
+if dpdk_conf.has('RTE_HAS_JANSSON')
+    sources += files(
+            'cpfl_flow_parser.c',
+    )
+    ext_deps += jansson_dep
+endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 2/9] net/cpfl: build action mapping rules from JSON
  2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
  2023-09-08 16:05                 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
@ 2023-09-08 16:05                 ` Zhang, Yuying
  2023-09-08 16:05                 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
                                   ` (6 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-08 16:05 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Build rules that maps from an rte_flow action vxlan_encap or
vxlan_decap to hardware representations.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_flow_parser.c | 538 +++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_flow_parser.h | 100 ++++++
 2 files changed, 637 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
index a5fff5a857..0e623494a2 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.c
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -28,6 +28,18 @@ cpfl_get_item_type_by_str(const char *type)
 	return RTE_FLOW_ITEM_TYPE_VOID;
 }
 
+static enum rte_flow_action_type
+cpfl_get_action_type_by_str(const char *type)
+{
+	if (strcmp(type, "vxlan_encap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	else if (strcmp(type, "vxlan_decap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ACTION_TYPE_VOID;
+}
+
 static const char *
 cpfl_json_t_to_string(json_t *object, const char *name)
 {
@@ -46,6 +58,29 @@ cpfl_json_t_to_string(json_t *object, const char *name)
 	return json_string_value(subobject);
 }
 
+static int
+cpfl_json_t_to_int(json_t *object, const char *name, int *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (int)json_integer_value(subobject);
+
+	return 0;
+}
+
 static int
 cpfl_json_t_to_uint16(json_t *object, const char *name, uint16_t *value)
 {
@@ -518,6 +553,228 @@ cpfl_flow_js_pattern_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 	return -EINVAL;
 }
 
+static int
+cpfl_flow_js_mr_key(json_t *ob_mr_keys, struct cpfl_flow_js_mr_key *js_mr_key)
+{
+	int len, i;
+
+	len = json_array_size(ob_mr_keys);
+	if (len == 0)
+		return 0;
+	js_mr_key->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_key_action) * len, 0);
+	if (!js_mr_key->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_mr_key->actions_size = len;
+	for (i = 0; i < len; i++) {
+		json_t *object, *ob_data;
+		const char *type;
+		enum rte_flow_action_type act_type;
+
+		object = json_array_get(ob_mr_keys, i);
+		/* mr->key->actions->type */
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		act_type = cpfl_get_action_type_by_str(type);
+		if (act_type == RTE_FLOW_ACTION_TYPE_VOID)
+			goto err;
+		js_mr_key->actions[i].type = act_type;
+		/* mr->key->actions->data */
+		ob_data = json_object_get(object, "data");
+		if (js_mr_key->actions[i].type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			json_t *ob_protos;
+			int proto_size, j;
+			struct cpfl_flow_js_mr_key_action_vxlan_encap *encap;
+
+			ob_protos = json_object_get(ob_data, "protocols");
+			encap = &js_mr_key->actions[i].encap;
+			if (!ob_protos) {
+				encap->proto_size = 0;
+				continue;
+			}
+			proto_size = json_array_size(ob_protos);
+			encap->proto_size = proto_size;
+			for (j = 0; j < proto_size; j++) {
+				const char *s;
+				json_t *subobject;
+				enum rte_flow_item_type proto_type;
+
+				subobject = json_array_get(ob_protos, j);
+				s = json_string_value(subobject);
+				proto_type = cpfl_get_item_type_by_str(s);
+				if (proto_type == RTE_FLOW_ITEM_TYPE_VOID) {
+					PMD_DRV_LOG(ERR, "parse VXLAN_ENCAP failed.");
+					goto err;
+				}
+				encap->protocols[j] = proto_type;
+			}
+		} else if (js_mr_key->actions[i].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			PMD_DRV_LOG(ERR, "not support this type: %d.", js_mr_key->actions[i].type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mr_key->actions);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_layout(json_t *ob_layouts, struct cpfl_flow_js_mr_action_mod *js_mod)
+{
+	int len, i;
+
+	len = json_array_size(ob_layouts);
+	js_mod->layout_size = len;
+	if (len == 0)
+		return 0;
+	js_mod->layout = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_layout) * len, 0);
+	if (!js_mod->layout) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		int index = 0, size = 0, offset = 0;
+		int ret;
+		const char *hint;
+
+		object = json_array_get(ob_layouts, i);
+		ret = cpfl_json_t_to_int(object, "index", &index);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'index'.");
+			goto err;
+		}
+		js_mod->layout[i].index = index;
+		ret = cpfl_json_t_to_int(object, "size", &size);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+			goto err;
+		}
+		js_mod->layout[i].size = size;
+		ret = cpfl_json_t_to_int(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_mod->layout[i].offset = offset;
+		hint = cpfl_json_t_to_string(object, "hint");
+		if (!hint) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'hint'.");
+			goto err;
+		}
+		memcpy(js_mod->layout[i].hint, hint, strlen(hint));
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mod->layout);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_action(json_t *ob_mr_act, struct cpfl_flow_js_mr_action *js_mr_act)
+{
+	json_t *ob_data;
+	const char *type;
+
+	/* mr->action->type */
+	type = cpfl_json_t_to_string(ob_mr_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* mr->action->data */
+	ob_data = json_object_get(ob_mr_act, "data");
+	if (strcmp(type, "mod") == 0) {
+		json_t *ob_layouts;
+		uint16_t profile = 0;
+		int ret;
+
+		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		ret = cpfl_json_t_to_uint16(ob_data, "profile", &profile);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		js_mr_act->mod.prof = profile;
+		ob_layouts = json_object_get(ob_data, "layout");
+		ret = cpfl_flow_js_mr_layout(ob_layouts, &js_mr_act->mod);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse layout.");
+			return ret;
+		}
+	} else  {
+		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * The modifications object array defines a set of rules for the PMD to match rte_flow
+ * modification actions and translate them into the Modification profile. This object
+ * is optional.
+ */
+static int
+cpfl_flow_js_mod_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_t *ob_mrs;
+	int i, len;
+
+	ob_mrs = json_object_get(ob_root, "modifications");
+	if (!ob_mrs) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+	len = json_array_size(ob_mrs);
+	if (len == 0)
+		return 0;
+	parser->mr_size = len;
+	parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr) * len, 0);
+	if (!parser->modifications) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		int ret;
+		json_t *object, *ob_mr_key, *ob_mr_action, *ob_mr_key_action;
+
+		object = json_array_get(ob_mrs, i);
+		/* mr->key */
+		ob_mr_key = json_object_get(object, "key");
+		/* mr->key->actions */
+		ob_mr_key_action = json_object_get(ob_mr_key, "actions");
+		ret = cpfl_flow_js_mr_key(ob_mr_key_action, &parser->modifications[i].key);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_key failed.");
+			goto err;
+		}
+		/* mr->action */
+		ob_mr_action = json_object_get(object, "action");
+		ret = cpfl_flow_js_mr_action(ob_mr_action, &parser->modifications[i].action);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_action failed.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->modifications);
+	return -EINVAL;
+}
+
 static int
 cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 {
@@ -528,6 +785,11 @@ cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
 		return ret;
 	}
+	ret = cpfl_flow_js_mod_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
+		return ret;
+	}
 
 	return 0;
 }
@@ -598,6 +860,15 @@ cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
 		rte_free(pattern->actions);
 	}
 	rte_free(parser->patterns);
+	for (i = 0; i < parser->mr_size; i++) {
+		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
+
+		if (!mr)
+			continue;
+		rte_free(mr->key.actions);
+		rte_free(mr->action.mod.layout);
+	}
+	rte_free(parser->modifications);
 	rte_free(parser);
 
 	return 0;
@@ -614,6 +885,17 @@ cpfl_get_items_length(const struct rte_flow_item *items)
 	return length;
 }
 
+static int
+cpfl_get_actions_length(const struct rte_flow_action *actions)
+{
+	int length = 0;
+	const struct rte_flow_action *action = actions;
+
+	while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END)
+		continue;
+	return length;
+}
+
 static int
 cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
 		       uint16_t offset, uint8_t *fv)
@@ -642,7 +924,7 @@ cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item
 				break;
 			}
 			layer++;
-		} /* TODO: more type... */
+		}
 	}
 
 	return 0;
@@ -1231,6 +1513,260 @@ cpfl_flow_parse_items(struct cpfl_itf *itf,
 	return -EINVAL;
 }
 
+/* modifications rules */
+static int
+cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap *encap,
+			       const struct rte_flow_action *action)
+{
+	const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+	struct rte_flow_item *definition;
+	int def_length, i, proto_size;
+
+	action_vxlan_encap = (const struct rte_flow_action_vxlan_encap *)action->conf;
+	definition = action_vxlan_encap->definition;
+	def_length = cpfl_get_items_length(definition);
+	proto_size = encap->proto_size;
+	if (proto_size != def_length - 1) {
+		PMD_DRV_LOG(DEBUG, "protocols not match.");
+		return -EINVAL;
+	}
+	for (i = 0; i < proto_size; i++) {
+		enum rte_flow_item_type proto;
+
+		proto = encap->protocols[i];
+		if (proto == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) {
+				PMD_DRV_LOG(DEBUG, "protocols not match.");
+				return -EINVAL;
+			}
+		} else if (proto != definition[i].type) {
+			PMD_DRV_LOG(DEBUG, "protocols not match.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* check and parse */
+static int
+cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int size,
+			 const struct rte_flow_action *actions,
+			 struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int actions_length, i;
+	int j = 0;
+	int ret;
+
+	actions_length = cpfl_get_actions_length(actions);
+	if (size > actions_length - 1)
+		return -EINVAL;
+	for (i = 0; i < size; i++) {
+		enum rte_flow_action_type type;
+		struct cpfl_flow_js_mr_key_action *key_act;
+
+		key_act = &key_acts[i];
+		/* mr->key->actions->type */
+		type = key_act->type;
+		/* mr->key->actions->data */
+		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			int proto_size, k;
+			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
+
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+			mr_key_action[i].encap.action = &actions[j];
+			encap = &mr_key_action[i].encap;
+
+			proto_size = key_act->encap.proto_size;
+			encap->proto_size = proto_size;
+			for (k = 0; k < proto_size; k++) {
+				enum rte_flow_item_type proto;
+
+				proto = key_act->encap.protocols[k];
+				encap->protocols[k] = proto;
+			}
+			ret = cpfl_check_actions_vxlan_encap(encap, &actions[j]);
+			if (ret < 0)
+				return -EINVAL;
+			j++;
+		} else if (type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+			j++;
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+/* output: uint8_t *buffer, uint16_t *byte_len */
+static int
+cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
+		  struct cpfl_flow_mr_key_action *mr_key_action,
+		  uint8_t *buffer, uint16_t *byte_len)
+{
+	int i;
+	int start = 0;
+
+	for (i = 0; i < layout_size; i++) {
+		int index, size, offset;
+		const char *hint;
+		const uint8_t *addr = NULL;
+		struct cpfl_flow_mr_key_action *temp;
+		struct cpfl_flow_js_mr_layout *layout;
+
+		layout = &layouts[i];
+		/* index links to the element of the actions array. */
+		index = layout->index;
+		size = layout->size;
+		offset = layout->offset;
+		if (index == -1) {
+			hint = "dummpy";
+			start += size;
+			continue;
+		}
+		hint = layout->hint;
+		temp = mr_key_action + index;
+		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+			struct rte_flow_item *definition;
+			int def_length, k;
+
+			action_vxlan_encap =
+			    (const struct rte_flow_action_vxlan_encap *)temp->encap.action->conf;
+			definition = action_vxlan_encap->definition;
+			def_length = cpfl_get_items_length(definition);
+			for (k = 0; k < def_length - 1; k++) {
+				if ((strcmp(hint, "eth") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_ETH) ||
+				    (strcmp(hint, "ipv4") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+				    (strcmp(hint, "udp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_UDP) ||
+				    (strcmp(hint, "tcp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_TCP) ||
+				    (strcmp(hint, "vxlan") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_VXLAN)) {
+					addr = (const uint8_t *)(definition[k].spec);
+					if (start > 255) {
+						*byte_len = 0;
+						PMD_DRV_LOG(ERR, "byte length is too long: %s",
+							    hint);
+						return -EINVAL;
+					}
+					memcpy(buffer + start, addr + offset, size);
+					break;
+				} /* TODO: more hint... */
+			}
+			if (k == def_length - 1) {
+				*byte_len = 0;
+				PMD_DRV_LOG(ERR, "can not find corresponding hint: %s", hint);
+				return -EINVAL;
+			}
+		} else {
+			*byte_len = 0;
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", temp->type);
+			return -EINVAL;
+		} /* else TODO: more type... */
+		start += size;
+	}
+	*byte_len = start;
+
+	return 0;
+}
+
+static int
+cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action,
+		     struct cpfl_flow_mr_key_action *mr_key_action,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	enum cpfl_flow_mr_action_type type;
+
+	/* mr->action->type */
+	type = action->type;
+	/* mr->action->data */
+	if (type == CPFL_JS_MR_ACTION_TYPE_MOD) {
+		struct cpfl_flow_js_mr_layout *layout;
+
+		mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		mr_action->mod.byte_len = 0;
+		mr_action->mod.prof = action->mod.prof;
+		layout = action->mod.layout;
+		if (!layout)
+			return 0;
+		memset(mr_action->mod.data, 0, sizeof(mr_action->mod.data));
+
+		return cpfl_parse_layout(layout, action->mod.layout_size, mr_key_action,
+					 mr_action->mod.data, &mr_action->mod.byte_len);
+	}
+	PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+
+	return -EINVAL;
+}
+
+static int
+cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action *actions,
+		   struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int key_action_size;
+
+	/* mr->key->actions */
+	key_action_size = mr->key.actions_size;
+	return cpfl_parse_mr_key_action(mr->key.actions, key_action_size, actions, mr_key_action);
+}
+
+/* output: struct cpfl_flow_mr_action *mr_action */
+static int
+cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	int i;
+	struct cpfl_flow_mr_key_action mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0};
+
+	for (i = 0; i < parser->mr_size; i++) {
+		int ret;
+		struct cpfl_flow_js_mr *mr;
+
+		mr = &parser->modifications[i];
+		if (!mr)
+			return -EINVAL;
+		ret = cpfl_check_mod_key(mr, actions, mr_key_action);
+		if (ret < 0)
+			continue;
+		/* mr->action */
+		return cpfl_parse_mr_action(&mr->action, mr_key_action, mr_action);
+	}
+
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+			struct cpfl_flow_mr_action *mr_action)
+{
+	/* modifications rules */
+	if (!parser->modifications) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	return cpfl_parse_mod_rules(parser, actions, mr_action);
+}
+
 bool
 cpfl_metadata_write_port_id(struct cpfl_itf *itf)
 {
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
index 268e1bc89f..962667adc2 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.h
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -106,9 +106,79 @@ struct cpfl_flow_js_pr {
 	uint16_t actions_size;
 };
 
+/* Modification Rules Storage */
+/**
+ * The vxlan_encap action matches RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
+ * and include a sequence of protocol headers defined in field protocols
+ * of data.
+ */
+struct cpfl_flow_js_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	int proto_size;
+};
+
+/* A set of modification rte_flow_action_xxx objects can be defined as a type / data pair. */
+struct cpfl_flow_js_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_js_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_js_mr_key {
+	struct cpfl_flow_js_mr_key_action *actions;
+	int actions_size;
+};
+
+struct cpfl_flow_js_mr_layout {
+	int index;				/* links to the element of the actions array */
+	char hint[CPFL_FLOW_JSON_STR_SIZE_MAX]; /* where the data to copy from */
+	uint16_t offset;			/* the start byte of the data to copy from */
+	uint16_t size; /*  bytes of the data to be copied to the memory region */
+};
+
+/** For mod data, besides the profile ID, a layout array defines a set of hints that helps
+ * driver composing the MOD memory region when the action need to insert/update some packet
+ * data from user input.
+ */
+struct cpfl_flow_js_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	struct cpfl_flow_js_mr_layout *layout;
+	int layout_size;
+};
+
+enum cpfl_flow_mr_action_type {
+	CPFL_JS_MR_ACTION_TYPE_MOD,
+};
+
+/** Currently, the type can only be mod.
+ *
+ * For mod data, besides the profile ID, a layout array defines a set
+ * of hints that helps driver composing the MOD memory region when the
+ * action need to insert/update some packet data from user input.
+ */
+struct cpfl_flow_js_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_js_mr_action_mod mod;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD to parse rte_flow modification
+ * actions. Each rule be described by a pair of key and action
+ */
+struct cpfl_flow_js_mr {
+	struct cpfl_flow_js_mr_key key;
+	struct cpfl_flow_js_mr_action action;
+};
+
 struct cpfl_flow_js_parser {
 	struct cpfl_flow_js_pr *patterns;
 	int pr_size;
+	struct cpfl_flow_js_mr *modifications;
+	int mr_size;
 };
 
 /* Pattern Rules */
@@ -126,6 +196,33 @@ struct cpfl_flow_pr_action {
 	};
 };
 
+/* Modification Rules */
+struct cpfl_flow_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	uint16_t proto_size;
+	const struct rte_flow_action *action;
+};
+
+struct cpfl_flow_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	uint8_t data[256];
+};
+
+struct cpfl_flow_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_mr_action_mod mod;
+	};
+};
+
 int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
 int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
 int cpfl_flow_parse_items(struct cpfl_itf *itf,
@@ -133,6 +230,9 @@ int cpfl_flow_parse_items(struct cpfl_itf *itf,
 			  const struct rte_flow_item *items,
 			  const struct rte_flow_attr *attr,
 			  struct cpfl_flow_pr_action *pr_action);
+int cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser,
+			    const struct rte_flow_action *actions,
+			    struct cpfl_flow_mr_action *mr_action);
 bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
 bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
 bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton
  2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
  2023-09-08 16:05                 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
  2023-09-08 16:05                 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
@ 2023-09-08 16:05                 ` Zhang, Yuying
  2023-09-08 16:05                 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
                                   ` (5 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-08 16:05 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Set up the rte_flow backend skeleton. Introduce the framework
to support different engines as rte_flow backend. Bridge rte_flow
driver API to flow engines.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  53 ++++++
 drivers/net/cpfl/cpfl_ethdev.h |   5 +
 drivers/net/cpfl/cpfl_flow.c   | 339 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow.h   |  85 +++++++++
 drivers/net/cpfl/meson.build   |   1 +
 5 files changed, 483 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 1745f703c8..c350728861 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -15,6 +15,7 @@
 #include "cpfl_ethdev.h"
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1074,6 +1075,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+	struct rte_flow *p_flow;
+
+	while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+		TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+		if (p_flow->engine->free)
+			p_flow->engine->free(p_flow);
+		rte_free(p_flow);
+	}
+}
+
 static int
 cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
 {
@@ -1105,6 +1119,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
 		cpfl_p2p_queue_grps_del(vport);
 
+	cpfl_flow_free(cpfl_vport);
 	idpf_vport_deinit(vport);
 	rte_free(cpfl_vport->p2p_q_chunks_info);
 
@@ -1117,6 +1132,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+		      const struct rte_flow_ops **ops)
+{
+	struct cpfl_itf *itf;
+
+	if (!dev)
+		return -EINVAL;
+
+	itf = CPFL_DEV_TO_ITF(dev);
+
+	/* only vport support rte_flow */
+	if (itf->type != CPFL_ITF_TYPE_VPORT)
+		return -ENOTSUP;
+#ifdef RTE_HAS_JANSSON
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static int
 cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 			    size_t len, uint32_t tx)
@@ -1318,6 +1356,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.xstats_get			= cpfl_dev_xstats_get,
 	.xstats_get_names		= cpfl_dev_xstats_get_names,
 	.xstats_reset			= cpfl_dev_xstats_reset,
+	.flow_ops_get			= cpfl_dev_flow_ops_get,
 	.hairpin_cap_get		= cpfl_hairpin_cap_get,
 	.rx_hairpin_queue_setup		= cpfl_rx_hairpin_queue_setup,
 	.tx_hairpin_queue_setup		= cpfl_tx_hairpin_queue_setup,
@@ -2021,6 +2060,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+#ifdef RTE_HAS_JANSSON
+	ret = cpfl_flow_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init flow module");
+		goto err_flow_init;
+	}
+#endif
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2028,6 +2074,9 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+#ifdef RTE_HAS_JANSSON
+err_flow_init:
+#endif
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2182,6 +2231,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
 	cpfl_vport->itf.adapter = adapter;
 	cpfl_vport->itf.data = dev->data;
+	TAILQ_INIT(&cpfl_vport->itf.flow_list);
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
@@ -2262,6 +2312,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+#ifdef RTE_HAS_JANSSON
+	cpfl_flow_uninit(adapter);
+#endif
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 383dbd14c6..69bf32cfbd 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -140,9 +140,12 @@ enum cpfl_itf_type {
 	CPFL_ITF_TYPE_REPRESENTOR,
 };
 
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
+	struct cpfl_flow_list flow_list;
 	void *data;
 };
 
@@ -206,6 +209,8 @@ struct cpfl_adapter_ext {
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
 
+	struct cpfl_flow_js_parser *flow_parser;
+
 	struct cpfl_metadata meta;
 };
 
diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c
new file mode 100644
index 0000000000..03dd1ffa44
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.c
@@ -0,0 +1,339 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+	TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->parse_pattern_action)
+			continue;
+
+		if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+			continue;
+		return engine;
+	}
+
+	return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+	int ret;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->init) {
+			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+				     engine->type);
+			return -ENOTSUP;
+		}
+
+		ret = engine->init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+				     engine->type);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (engine->uninit)
+			engine->uninit(adapter);
+	}
+}
+
+static int
+cpfl_flow_attr_valid(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	if (attr->priority > CPFL_PREC_MAX) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Only support priority 0-7.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	int ret;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = cpfl_flow_attr_valid(attr, error);
+	if (ret)
+		return ret;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+__cpfl_flow_validate(struct rte_eth_dev *dev,
+		     const struct rte_flow_attr *attr,
+		     const struct rte_flow_item pattern[],
+		     const struct rte_flow_action actions[],
+		     void **meta,
+		     struct cpfl_flow_engine **engine,
+		     struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+	if (ret)
+		return ret;
+
+	*engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta);
+	if (!*engine) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched engine.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	int ret;
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error);
+
+	return ret;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_engine *engine = NULL;
+	struct rte_flow *flow;
+	void *meta;
+	int ret;
+
+	flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return NULL;
+	}
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	if (!engine->create) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched flow creation function");
+		rte_free(flow);
+		return NULL;
+	}
+
+	ret = engine->create(dev, flow, meta, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	flow->engine = engine;
+	TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+	return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	int ret = 0;
+
+	if (!flow || !flow->engine || !flow->engine->destroy) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	ret = flow->engine->destroy(dev, flow, error);
+	if (!ret)
+		TAILQ_REMOVE(&itf->flow_list, flow, next);
+	else
+		PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+	return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct rte_flow *p_flow;
+	void *temp;
+	int ret = 0;
+
+	RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+		ret = cpfl_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to flush flows");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		const struct rte_flow_action *actions,
+		void *data,
+		struct rte_flow_error *error)
+{
+	struct rte_flow_query_count *count = data;
+	int ret = -EINVAL;
+
+	if (!flow || !flow->engine || !flow->engine->query_count) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			ret = flow->engine->query_count(dev, flow, count, error);
+			break;
+		default:
+			ret = rte_flow_error_set(error, ENOTSUP,
+						 RTE_FLOW_ERROR_TYPE_ACTION,
+						 actions,
+						 "action not supported");
+			break;
+		}
+	}
+
+	return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+	.validate = cpfl_flow_validate,
+	.create = cpfl_flow_create,
+	.destroy = cpfl_flow_destroy,
+	.flush = cpfl_flow_flush,
+	.query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+	int ret;
+
+	if (ad->devargs.flow_parser[0] == '\0') {
+		PMD_INIT_LOG(WARNING, "flow module is not initialized");
+		return 0;
+	}
+
+	ret = cpfl_flow_engine_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init flow engines");
+		goto err;
+	}
+
+	ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to create flow parser");
+		goto err;
+	}
+
+	return ret;
+
+err:
+	cpfl_flow_engine_uninit(ad);
+	return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+	if (ad->devargs.flow_parser[0] == '\0')
+		return;
+
+	cpfl_parser_destroy(ad->flow_parser);
+	cpfl_flow_engine_uninit(ad);
+}
diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h
new file mode 100644
index 0000000000..8c19b853ca
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.h
@@ -0,0 +1,85 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#define CPFL_PREC_MAX 7
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+	CPFL_FLOW_ENGINE_NONE = 0,
+	CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+			       struct rte_flow *flow,
+			       void *meta,
+			       struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+				struct rte_flow *flow,
+				struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+			      struct rte_flow *flow,
+			      struct rte_flow_query_count *count,
+			      struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+					     const struct rte_flow_attr *attr,
+					     const struct rte_flow_item pattern[],
+					     const struct rte_flow_action actions[],
+					     void **meta);
+
+struct cpfl_flow_engine {
+	TAILQ_ENTRY(cpfl_flow_engine) node;
+	enum cpfl_flow_engine_type type;
+	engine_init_t init;
+	engine_uninit_t uninit;
+	engine_create_t create;
+	engine_destroy_t destroy;
+	engine_query_t query_count;
+	engine_free_t free;
+	engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) next;
+	struct cpfl_flow_engine *engine;
+	void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev,
+						const struct rte_flow_attr *attr,
+						const struct rte_flow_item pattern[],
+						const struct rte_flow_action actions[],
+						void **meta);
+int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+				  const struct rte_flow_attr *attr,
+				  const struct rte_flow_item pattern[],
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+		    struct rte_flow *flow,
+		    const struct rte_flow_action *actions,
+		    void *data,
+		    struct rte_flow_error *error);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d767818eb7..f5654d5b0e 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -41,6 +41,7 @@ endif
 
 if dpdk_conf.has('RTE_HAS_JANSSON')
     sources += files(
+	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
     )
     ext_deps += jansson_dep
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 4/9] net/cpfl: set up control path
  2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
                                   ` (2 preceding siblings ...)
  2023-09-08 16:05                 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
@ 2023-09-08 16:05                 ` Zhang, Yuying
  2023-09-08 16:05                 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
                                   ` (4 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-08 16:05 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Set up a dedicate vport with 4 pairs of control queues for flow offloading.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_controlq.c | 801 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  75 +++
 drivers/net/cpfl/cpfl_ethdev.c   | 270 +++++++++++
 drivers/net/cpfl/cpfl_ethdev.h   |  14 +
 drivers/net/cpfl/cpfl_vchnl.c    | 144 ++++++
 drivers/net/cpfl/meson.build     |   1 +
 6 files changed, 1305 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h

diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
new file mode 100644
index 0000000000..4a925bc338
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -0,0 +1,801 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include "cpfl_controlq.h"
+#include "base/idpf_controlq.h"
+#include "rte_common.h"
+
+/**
+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
+ * @qinfo: pointer to create control queue info struct
+ *
+ * Verify that DMA parameter of each DMA memory struct is present and
+ * consistent with control queue parameters
+ */
+static inline int
+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
+{
+	struct idpf_dma_mem *ring = &qinfo->ring_mem;
+	struct idpf_dma_mem *buf = &qinfo->buf_mem;
+
+	if (!ring->va || !ring->size)
+		return -EINVAL;
+
+	if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
+		return -EINVAL;
+
+	/* no need for buffer checks for TX queues */
+	if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
+		return 0;
+
+	if (!buf->va || !buf->size)
+		return -EINVAL;
+
+	/* accommodate different types of rx ring buffer sizes */
+	if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
+	     buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
+	    (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
+	     buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ * @qinfo: pointer to create queue info struct
+ *
+ * The CP takes care of all DMA memory allocations. Store the allocated memory
+ * information for the descriptor ring and buffers. If the memory for either the
+ * descriptor ring or the buffers is not allocated properly and/or inconsistent
+ * with the control queue parameters, this routine will free the memory for
+ * both the descriptors and the buffers
+ */
+int
+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
+			 struct cpfl_ctlq_create_info *qinfo)
+{
+	int ret_code = 0;
+	unsigned int elem_size;
+	int i = 0;
+
+	ret_code = cpfl_check_dma_mem_parameters(qinfo);
+	if (ret_code)
+		/* TODO: Log an error message per CP */
+		goto err;
+
+	cq->desc_ring.va = qinfo->ring_mem.va;
+	cq->desc_ring.pa = qinfo->ring_mem.pa;
+	cq->desc_ring.size = qinfo->ring_mem.size;
+
+	switch (cq->cq_type) {
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_RDMA_RX:
+		/* Only receive queues will have allocated buffers
+		 * during init.  CP allocates one big chunk of DMA
+		 * region who size is equal to ring_len * buff_size.
+		 * In CPFLib, the block gets broken down to multiple
+		 * smaller blocks that actually gets programmed in the hardware.
+		 */
+
+		cq->bi.rx_buff = (struct idpf_dma_mem **)
+			idpf_calloc(hw, cq->ring_size,
+				    sizeof(struct idpf_dma_mem *));
+		if (!cq->bi.rx_buff) {
+			ret_code = -ENOMEM;
+			/* TODO: Log an error message per CP */
+			goto err;
+		}
+
+		elem_size = qinfo->buf_size;
+		for (i = 0; i < cq->ring_size; i++) {
+			cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
+					    (hw, 1,
+					     sizeof(struct idpf_dma_mem));
+			if (!cq->bi.rx_buff[i]) {
+				ret_code = -ENOMEM;
+				goto free_rx_buffs;
+			}
+			cq->bi.rx_buff[i]->va =
+			    (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));
+			cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
+					       (i * elem_size);
+			cq->bi.rx_buff[i]->size = elem_size;
+		}
+		break;
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_RDMA_TX:
+	case IDPF_CTLQ_TYPE_RDMA_COMPL:
+		break;
+	default:
+		ret_code = -EINVAL;
+	}
+
+	return ret_code;
+
+free_rx_buffs:
+	i--;
+	for (; i >= 0; i--)
+		idpf_free(hw, cq->bi.rx_buff[i]);
+
+	if (!cq->bi.rx_buff)
+		idpf_free(hw, cq->bi.rx_buff);
+
+err:
+	return ret_code;
+}
+
+/**
+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	for (i = 0; i < cq->ring_size; i++) {
+		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+		/* No buffer to post to descriptor, continue */
+		if (!bi)
+			continue;
+
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+		desc->opcode = 0;
+		desc->datalen = CPU_TO_LE16(bi->size);
+		desc->ret_val = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+		desc->params.indirect.param0 = 0;
+		desc->params.indirect.param1 = 0;
+	}
+}
+
+/**
+ * cpfl_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
+{
+	/* set control queue registers in our local struct */
+	cq->reg.head = q_create_info->reg.head;
+	cq->reg.tail = q_create_info->reg.tail;
+	cq->reg.len = q_create_info->reg.len;
+	cq->reg.bah = q_create_info->reg.bah;
+	cq->reg.bal = q_create_info->reg.bal;
+	cq->reg.len_mask = q_create_info->reg.len_mask;
+	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+	cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * cpfl_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
+{
+	/* Update tail to post pre-allocated buffers for rx queues */
+	if (is_rxq)
+		wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
+
+	/* For non-Mailbox control queues only TAIL need to be set */
+	if (cq->q_id != -1)
+		return;
+
+	/* Clear Head for both send or receive */
+	wr32(hw, cq->reg.head, 0);
+
+	/* set starting point */
+	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
+ * @hw: context info for the callback
+ * @cq: pointer to the specific control queue
+ *
+ * DMA buffers are released by the CP itself
+ */
+static void
+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
+{
+	int i;
+
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
+	    cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
+		for (i = 0; i < cq->ring_size; i++)
+			idpf_free(hw, cq->bi.rx_buff[i]);
+		/* free the buffer header */
+		idpf_free(hw, cq->bi.rx_buff);
+	} else {
+		idpf_free(hw, cq->bi.tx_msg);
+	}
+}
+
+/**
+ * cpfl_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ */
+int
+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+	      struct idpf_ctlq_info **cq_out)
+{
+	struct idpf_ctlq_info *cq;
+	bool is_rxq = false;
+	int status = 0;
+
+	if (!qinfo->len || !qinfo->buf_size ||
+	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+		return -EINVAL;
+
+	cq = (struct idpf_ctlq_info *)
+	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+
+	if (!cq)
+		return -ENOMEM;
+
+	cq->cq_type = qinfo->type;
+	cq->q_id = qinfo->id;
+	cq->buf_size = qinfo->buf_size;
+	cq->ring_size = qinfo->len;
+
+	cq->next_to_use = 0;
+	cq->next_to_clean = 0;
+	cq->next_to_post = cq->ring_size - 1;
+
+	switch (qinfo->type) {
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+		is_rxq = true;
+		/* fallthrough */
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+		status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
+		break;
+
+	default:
+		status = -EINVAL;
+		break;
+	}
+
+	if (status)
+		goto init_free_q;
+
+	if (is_rxq) {
+		cpfl_ctlq_init_rxq_bufs(cq);
+	} else {
+		/* Allocate the array of msg pointers for TX queues */
+		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
+			idpf_calloc(hw, qinfo->len,
+				    sizeof(struct idpf_ctlq_msg *));
+		if (!cq->bi.tx_msg) {
+			status = -ENOMEM;
+			goto init_dealloc_q_mem;
+		}
+	}
+
+	cpfl_ctlq_setup_regs(cq, qinfo);
+
+	cpfl_ctlq_init_regs(hw, cq, is_rxq);
+
+	idpf_init_lock(&cq->cq_lock);
+
+	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
+
+	*cq_out = cq;
+	return status;
+
+init_dealloc_q_mem:
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+	idpf_free(hw, cq);
+	cq = NULL;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+	       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_desc *desc;
+	int num_desc_avail = 0;
+	int status = 0;
+	int i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	/* Ensure there are enough descriptors to send all messages */
+	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+		status = -ENOSPC;
+		goto sq_send_command_out;
+	}
+
+	for (i = 0; i < num_q_msg; i++) {
+		struct idpf_ctlq_msg *msg = &q_msg[i];
+
+		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+		desc->opcode = CPU_TO_LE16(msg->opcode);
+		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+		desc->cookie_high =
+			CPU_TO_LE32(msg->cookie.mbx.chnl_opcode);
+		desc->cookie_low =
+			CPU_TO_LE32(msg->cookie.mbx.chnl_retval);
+		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+				IDPF_CTLQ_FLAG_HOST_ID_S);
+		if (msg->data_len) {
+			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+			desc->datalen |= CPU_TO_LE16(msg->data_len);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+			/* Update the address values in the desc with the pa
+			 * value for respective buffer
+			 */
+			desc->params.indirect.addr_high =
+				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+			desc->params.indirect.addr_low =
+				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		} else {
+			idpf_memcpy(&desc->params, msg->ctx.direct,
+				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		}
+
+		/* Store buffer info */
+		cq->bi.tx_msg[cq->next_to_use] = msg;
+		(cq->next_to_use)++;
+		if (cq->next_to_use == cq->ring_size)
+			cq->next_to_use = 0;
+	}
+
+	/* Force memory write to complete before letting hardware
+	 * know that there are new descriptors to fetch.
+	 */
+	idpf_wmb();
+	wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+	idpf_release_lock(&cq->cq_lock);
+
+	return status;
+}
+
+/**
+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
+ * back for the requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ * @force: (input) clean descriptors which were not done yet. Use with caution
+ * in kernel mode only
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+static int
+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		     struct idpf_ctlq_msg *msg_status[], bool force)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t i = 0, num_to_clean;
+	uint16_t ntc, desc_err;
+	int ret = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*clean_count == 0)
+		return 0;
+	if (*clean_count > cq->ring_size)
+		return -EINVAL;
+
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *clean_count;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		desc_err = LE16_TO_CPU(desc->ret_val);
+		if (desc_err) {
+			/* strip off FW internal code */
+			desc_err &= 0xff;
+		}
+
+		msg_status[i] = cq->bi.tx_msg[ntc];
+		if (!msg_status[i])
+			break;
+		msg_status[i]->status = desc_err;
+		cq->bi.tx_msg[ntc] = NULL;
+		/* Zero out any stale data */
+		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	}
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+
+	/* Return number of descriptors actually cleaned */
+	*clean_count = i;
+
+	return ret;
+}
+
+/**
+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		   struct idpf_ctlq_msg *msg_status[])
+{
+	return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
+}
+
+/**
+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t ntp = cq->next_to_post;
+	bool buffs_avail = false;
+	uint16_t tbp = ntp + 1;
+	int status = 0;
+	int i = 0;
+
+	if (*buff_count > cq->ring_size)
+		return -EINVAL;
+
+	if (*buff_count > 0)
+		buffs_avail = true;
+	idpf_acquire_lock(&cq->cq_lock);
+	if (tbp >= cq->ring_size)
+		tbp = 0;
+
+	if (tbp == cq->next_to_clean)
+		/* Nothing to do */
+		goto post_buffs_out;
+
+	/* Post buffers for as many as provided or up until the last one used */
+	while (ntp != cq->next_to_clean) {
+		desc = IDPF_CTLQ_DESC(cq, ntp);
+		if (cq->bi.rx_buff[ntp])
+			goto fill_desc;
+		if (!buffs_avail) {
+			/* If the caller hasn't given us any buffers or
+			 * there are none left, search the ring itself
+			 * for an available buffer to move to this
+			 * entry starting at the next entry in the ring
+			 */
+			tbp = ntp + 1;
+			/* Wrap ring if necessary */
+			if (tbp >= cq->ring_size)
+				tbp = 0;
+
+			while (tbp != cq->next_to_clean) {
+				if (cq->bi.rx_buff[tbp]) {
+					cq->bi.rx_buff[ntp] =
+						cq->bi.rx_buff[tbp];
+					cq->bi.rx_buff[tbp] = NULL;
+
+					/* Found a buffer, no need to
+					 * search anymore
+					 */
+					break;
+				}
+
+				/* Wrap ring if necessary */
+				tbp++;
+				if (tbp >= cq->ring_size)
+					tbp = 0;
+			}
+
+			if (tbp == cq->next_to_clean)
+				goto post_buffs_out;
+		} else {
+			/* Give back pointer to DMA buffer */
+			cq->bi.rx_buff[ntp] = buffs[i];
+			i++;
+
+			if (i >= *buff_count)
+				buffs_avail = false;
+		}
+
+fill_desc:
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+		/* Post buffers to descriptor */
+		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+		ntp++;
+		if (ntp == cq->ring_size)
+			ntp = 0;
+	}
+
+post_buffs_out:
+	/* Only update tail if buffers were actually posted */
+	if (cq->next_to_post != ntp) {
+		if (ntp)
+			/* Update next_to_post to ntp - 1 since current ntp
+			 * will not have a buffer
+			 */
+			cq->next_to_post = ntp - 1;
+		else
+			/* Wrap to end of end ring since current ntp is 0 */
+			cq->next_to_post = cq->ring_size - 1;
+
+		wr32(hw, cq->reg.tail, cq->next_to_post);
+	}
+
+	idpf_release_lock(&cq->cq_lock);
+	/* return the number of buffers that were not posted */
+	*buff_count = *buff_count - i;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+	       struct idpf_ctlq_msg *q_msg)
+{
+	uint16_t num_to_clean, ntc, ret_val, flags;
+	struct idpf_ctlq_desc *desc;
+	int ret_code = 0;
+	uint16_t i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*num_q_msg == 0)
+		return 0;
+	else if (*num_q_msg > cq->ring_size)
+		return -EINVAL;
+
+	/* take the lock before we start messing with the ring */
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *num_q_msg;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		flags = LE16_TO_CPU(desc->flags);
+		if (!(flags & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		ret_val = LE16_TO_CPU(desc->ret_val);
+		q_msg[i].vmvf_type = (flags &
+				     (IDPF_CTLQ_FLAG_FTYPE_VM |
+				      IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+				      IDPF_CTLQ_FLAG_FTYPE_S;
+
+		if (flags & IDPF_CTLQ_FLAG_ERR)
+			ret_code = -EBADMSG;
+
+		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
+		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
+		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+		q_msg[i].status = ret_val;
+
+		if (desc->datalen) {
+			idpf_memcpy(q_msg[i].ctx.indirect.context,
+				    &desc->params.indirect,
+				    IDPF_INDIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+
+			/* Assign pointer to dma buffer to ctlq_msg array
+			 * to be given to upper layer
+			 */
+			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+			/* Zero out pointer to DMA buffer info;
+			 * will be repopulated by post buffers API
+			 */
+			cq->bi.rx_buff[ntc] = NULL;
+		} else {
+			idpf_memcpy(q_msg[i].ctx.direct,
+				    desc->params.raw,
+				    IDPF_DIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+		}
+
+		/* Zero out stale data in descriptor */
+		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+			    IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	};
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+	*num_q_msg = i;
+	if (*num_q_msg == 0)
+		ret_code = -ENOMSG;
+
+	return ret_code;
+}
+
+int
+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+		    struct idpf_ctlq_info **cq)
+{
+	return cpfl_ctlq_add(hw, qinfo, cq);
+}
+
+/**
+ * cpfl_ctlq_shutdown - shutdown the CQ
+ * The main shutdown routine for any controq queue
+ */
+static void
+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (!cq->ring_size)
+		goto shutdown_sq_out;
+
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+
+	/* Set ring_size to 0 to indicate uninitialized queue */
+	cq->ring_size = 0;
+
+shutdown_sq_out:
+	idpf_release_lock(&cq->cq_lock);
+	idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * cpfl_ctlq_remove - deallocate and remove specified control queue
+ */
+static void
+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	LIST_REMOVE(cq, cq_list);
+	cpfl_ctlq_shutdown(hw, cq);
+	idpf_free(hw, cq);
+}
+
+void
+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	cpfl_ctlq_remove(hw, cq);
+}
+
+int
+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		     struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			      uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
+
+int
+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			 struct idpf_ctlq_msg *msg_status[])
+{
+	return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
new file mode 100644
index 0000000000..740ae6522c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CONTROLQ_H_
+#define _CPFL_CONTROLQ_H_
+
+#include "base/idpf_osdep.h"
+#include "base/idpf_controlq_api.h"
+
+#define CPFL_CTLQ_DESCRIPTOR_SIZE	32
+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE	4096
+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE	256
+#define CPFL_DFLT_MBX_RING_LEN		512
+#define CPFL_CFGQ_RING_LEN		512
+
+/* CRQ/CSQ specific error codes */
+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */
+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */
+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */
+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */
+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct cpfl_ctlq_create_info {
+	enum idpf_ctlq_type type;
+	int id; /* absolute queue offset passed as input
+		 * -1 for default mailbox if present
+		 */
+	uint16_t len; /* Queue length passed as input */
+	uint16_t buf_size; /* buffer size passed as input */
+	uint64_t base_address; /* output, HPA of the Queue start  */
+	struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+	/* Pass down previously allocated descriptor ring and buffer memory
+	 * for each control queue to be created
+	 */
+	struct idpf_dma_mem ring_mem;
+	/* The CP will allocate one large buffer that the CPFlib will piece
+	 * into individual buffers for each descriptor
+	 */
+	struct idpf_dma_mem buf_mem;
+
+	int ext_info_size;
+	void *ext_info; /* Specific to q type */
+};
+
+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
+			     struct idpf_ctlq_info *cq,
+			     struct cpfl_ctlq_create_info *qinfo);
+int cpfl_ctlq_add(struct idpf_hw *hw,
+		  struct cpfl_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq);
+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+		       struct idpf_ctlq_msg *msg_status[]);
+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			    u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg);
+int cpfl_vport_ctlq_add(struct idpf_hw *hw,
+			struct cpfl_ctlq_create_info *qinfo,
+			struct idpf_ctlq_info **cq);
+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			 u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+			 struct idpf_ctlq_msg q_msg[]);
+
+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+				  u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+			     struct idpf_ctlq_msg *msg_status[]);
+#endif
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c350728861..a2bc6784d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1698,6 +1698,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 		return;
 	}
 
+	/* ignore if it is ctrl vport */
+	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
+		return;
+
 	vport = cpfl_find_vport(adapter, vc_event->vport_id);
 	if (!vport) {
 		PMD_DRV_LOG(ERR, "Can't find vport.");
@@ -1893,6 +1897,262 @@ cpfl_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 }
 
+static int
+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	ret = cpfl_config_ctlq_tx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
+		return ret;
+	}
+
+	ret = cpfl_config_ctlq_rx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
+		return ret;
+	}
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	int i;
+
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (adapter->ctlqp[i])
+			cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+}
+
+static int
+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_ctlq_info *cfg_cq;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		cfg_cq = NULL;
+		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
+					  &adapter->cfgq_info[i],
+					  &cfg_cq);
+		if (ret || !cfg_cq) {
+			PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d",
+				    adapter->cfgq_info[i].id);
+			cpfl_remove_cfgqs(adapter);
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
+			    adapter->cfgq_info[i].id);
+		adapter->ctlqp[i] = cfg_cq;
+	}
+
+	return ret;
+}
+
+#define CPFL_CFGQ_RING_LEN		512
+#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
+#define CPFL_CFGQ_BUFFER_SIZE		256
+#define CPFL_CFGQ_RING_SIZE		512
+
+static int
+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	struct cpfl_vport *vport;
+	int i, err;
+	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);
+	uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;
+
+	vport = &adapter->ctrl_vport;
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (i % 2 == 0) {
+			/* Setup Tx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +
+				i / 2 * vport->base.chunks_info.tx_qtail_spacing;
+		} else {
+			/* Setup Rx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +
+				i / 2 * vport->base.chunks_info.rx_qtail_spacing;
+			if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,
+						buf_size)) {
+				err = -ENOMEM;
+				goto free_mem;
+			}
+		}
+		if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,
+					ring_size)) {
+			err = -ENOMEM;
+			goto free_mem;
+		}
+	}
+	return 0;
+free_mem:
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+	return err;
+}
+
+static int
+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_create_vport *vport_info =
+		(struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;
+	int i;
+
+	vport->itf.adapter = adapter;
+	vport->base.adapter = &adapter->base;
+	vport->base.vport_id = vport_info->vport_id;
+
+	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+		if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {
+			vport->base.chunks_info.tx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.tx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.tx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {
+			vport->base.chunks_info.rx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.rx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.rx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else {
+			PMD_INIT_LOG(ERR, "Unsupported chunk type");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)
+{
+	cpfl_stop_cfgqs(adapter);
+	cpfl_remove_cfgqs(adapter);
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+}
+
+static int
+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_vc_create_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to create control vport");
+		return ret;
+	}
+
+	ret = cpfl_init_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init control vport");
+		goto err_init_ctrl_vport;
+	}
+
+	ret = cpfl_cfgq_setup(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control queues");
+		goto err_cfgq_setup;
+	}
+
+	ret = cpfl_add_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to add control queues");
+		goto err_add_cfgq;
+	}
+
+	ret = cpfl_start_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to start control queues");
+		goto err_start_cfgqs;
+	}
+
+	return 0;
+
+err_start_cfgqs:
+	cpfl_stop_cfgqs(adapter);
+err_add_cfgq:
+	cpfl_remove_cfgqs(adapter);
+err_cfgq_setup:
+err_init_ctrl_vport:
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+
+	return ret;
+}
+
 static struct virtchnl2_get_capabilities req_caps = {
 	.csum_caps =
 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
@@ -2060,6 +2320,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+	ret = cpfl_ctrl_path_open(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control path");
+		goto err_create_ctrl_vport;
+	}
+
 #ifdef RTE_HAS_JANSSON
 	ret = cpfl_flow_init(adapter);
 	if (ret) {
@@ -2076,7 +2342,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 #ifdef RTE_HAS_JANSSON
 err_flow_init:
+	cpfl_ctrl_path_close(adapter);
 #endif
+err_create_ctrl_vport:
+	rte_free(adapter->vports);
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2315,6 +2584,7 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 #ifdef RTE_HAS_JANSSON
 	cpfl_flow_uninit(adapter);
 #endif
+	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 69bf32cfbd..7f83d170d7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -22,6 +22,7 @@
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
 #include "cpfl_representor.h"
+#include "cpfl_controlq.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -82,6 +83,10 @@
 #define CPFL_META_CHUNK_LENGTH	1024
 #define CPFL_META_LENGTH	32
 
+#define CPFL_RX_CFGQ_NUM	4
+#define CPFL_TX_CFGQ_NUM	4
+#define CPFL_CFGQ_NUM		8
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -212,6 +217,12 @@ struct cpfl_adapter_ext {
 	struct cpfl_flow_js_parser *flow_parser;
 
 	struct cpfl_metadata meta;
+
+	/* ctrl vport and ctrl queues. */
+	struct cpfl_vport ctrl_vport;
+	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
+	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
+	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -226,6 +237,9 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 			   struct cpchnl2_vport_id *vport_id,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_info_response *response);
+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
index a21a4a451f..7d277a0e8e 100644
--- a/drivers/net/cpfl/cpfl_vchnl.c
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 
 	return 0;
 }
+
+int
+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct virtchnl2_create_vport vport_msg;
+	struct idpf_cmd_info args;
+	int err = -1;
+
+	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
+	vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+	vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
+	vport_msg.num_tx_complq = 0;
+	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
+	vport_msg.num_rx_bufq = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+	args.in_args = (uint8_t *)&vport_msg;
+	args.in_args_size = sizeof(vport_msg);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+		return err;
+	}
+
+	memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
+	       IDPF_DFLT_MBX_BUF_SIZE);
+	return err;
+}
+
+int
+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_RX_CFGQ_NUM;
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (!vc_rxqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->base.vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		rxq_info = &vc_rxqs->qinfo[i];
+		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
+		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;
+		rxq_info->max_pkt_size = vport->base.max_pkt_len;
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_rxqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_TX_CFGQ_NUM;
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (!vc_txqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->base.vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		txq_info = &vc_txqs->qinfo[i];
+		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
+		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_txqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index f5654d5b0e..290ff1e655 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -18,6 +18,7 @@ sources = files(
         'cpfl_rxtx.c',
         'cpfl_vchnl.c',
         'cpfl_representor.c',
+        'cpfl_controlq.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 5/9] net/cpfl: add FXP low level implementation
  2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
                                   ` (3 preceding siblings ...)
  2023-09-08 16:05                 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
@ 2023-09-08 16:05                 ` Zhang, Yuying
  2023-09-08 16:05                 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
                                   ` (3 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-08 16:05 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add low level helper functions for CPFL PMD to create / delete
rules on IPU's Flexible Packet Processor(FXP).

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_actions.h | 858 ++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_rules.c   | 127 +++++
 drivers/net/cpfl/cpfl_rules.h   | 306 ++++++++++++
 drivers/net/cpfl/meson.build    |   1 +
 4 files changed, 1292 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h
new file mode 100644
index 0000000000..7b82119e39
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_actions.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_ACTIONS_H_
+#define _CPFL_ACTIONS_H_
+
+#include "base/idpf_osdep.h"
+
+#pragma pack(1)
+
+union cpfl_action_set {
+	uint32_t data;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 4;
+		uint32_t tag : 1;
+		uint32_t prec : 3;
+	} set_24b_a;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 3;
+		uint32_t tag : 2;
+		uint32_t prec : 3;
+	} set_24b_b;
+
+	struct {
+		uint32_t val : 16;
+		uint32_t idx : 4;
+		uint32_t unused : 6;
+		uint32_t tag : 3;
+		uint32_t prec : 3;
+	} set_16b;
+
+	struct {
+		uint32_t val_a : 8;
+		uint32_t val_b : 8;
+		uint32_t idx_a : 4;
+		uint32_t idx_b : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_8b;
+
+	struct {
+		uint32_t val : 10;
+		uint32_t ena : 10;
+		uint32_t idx : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_1b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} nop;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} chained_24b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} aux_flags;
+};
+
+struct cpfl_action_set_ext {
+#define CPFL_ACTION_SET_EXT_CNT 2
+	union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT];
+};
+
+#pragma pack()
+
+/**
+ * cpfl_act_nop - Encode a NOP action
+ */
+static inline union cpfl_action_set
+cpfl_act_nop(void)
+{
+	union cpfl_action_set act;
+
+	act.data = 0;
+	return act;
+}
+
+/**
+ * cpfl_is_nop_action - Indicate if an action set is a NOP
+ */
+static inline bool
+cpfl_is_nop_action(union cpfl_action_set *act)
+{
+	return act->data == cpfl_act_nop().data;
+}
+
+#define CPFL_MAKE_MASK32(b, s)	((((uint32_t)1 << (b)) - 1) << (s))
+
+#define CPFL_ACT_PREC_MAX	7
+#define CPFL_ACT_PREC_S		29
+#define CPFL_ACT_PREC_M		CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S)
+#define CPFL_ACT_PREC_SET(p)	\
+	(((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M)
+#define CPFL_ACT_PREC_CHECK(p)	((p) > 0 && (p) <= CPFL_ACT_PREC_MAX)
+
+#define CPFL_METADATA_ID_CNT		32	/* Max number of metadata IDs */
+#define CPFL_METADATA_STRUCT_MAX_SZ	128	/* Max metadata size per ID */
+
+/*******************************************************************************
+ * 1-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_1B_OP_S	24
+#define CPFL_ACT_1B_OP_M	CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S)
+#define CPFL_ACT_1B_OP		((uint32_t)(0x01) << CPFL_ACT_1B_OP_S)
+
+#define CPFL_ACT_1B_VAL_S	0
+#define CPFL_ACT_1B_VAL_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S)
+#define CPFL_ACT_1B_EN_S	10
+#define CPFL_ACT_1B_EN_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S)
+#define CPFL_ACT_1B_INDEX_S	20
+#define CPFL_ACT_1B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S)
+
+/* 1-bit actions currently uses only INDEX of 0 */
+#define CPFL_ACT_MAKE_1B(prec, en, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \
+	 ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \
+	 (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M))
+
+enum cpfl_act_1b_op {
+	CPFL_ACT_1B_OP_DROP		= 0x01,
+	CPFL_ACT_1B_OP_HDR_SPLIT	= 0x02,
+	CPFL_ACT_1B_OP_DIR_CHANGE	= 0x04,
+	CPFL_ACT_1B_OP_DEFER_DROP	= 0x08,
+	CPFL_ACT_1B_OP_ORIG_MIR_MD	= 0x80
+};
+
+#define CPFL_ACT_1B_COMMIT_MODE_S	4
+#define CPFL_ACT_1B_COMMIT_MODE_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S)
+
+/**
+ * cpfl_act_commit_mode - action commit mode for certain action classes
+ */
+enum cpfl_act_commit_mode {
+	/* Action processing for the initial classification pass */
+	CPFL_ACT_COMMIT_ALL		= 0, /* Commit all actions */
+	CPFL_ACT_COMMIT_PRE_MOD		= 1, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_NONE		= 2, /* Commit no action */
+	/* Action processing for deferred actions in a recirculation pass */
+	CPFL_ACT_COMMIT_RECIR_ALL	= 4, /* Commit all actions */
+	CPFL_ACT_COMMIT_RECIR_PRE_MOD	= 5, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_RECIR_NONE	= 6  /* Commit no action */
+};
+
+/*******************************************************************************
+ * 8-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_8B_S	24
+#define CPFL_ACT_OP_8B_M	CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S)
+#define CPFL_ACT_OP_8B		((uint32_t)(0x02) << CPFL_ACT_OP_8B_S)
+
+#define CPFL_ACT_8B_A_VAL_S	0
+#define CPFL_ACT_8B_A_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S)
+#define CPFL_ACT_8B_A_INDEX_S	16
+#define CPFL_ACT_8B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S)
+
+#define CPFL_ACT_8B_B_VAL_S	8
+#define CPFL_ACT_8B_B_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S)
+#define CPFL_ACT_8B_B_INDEX_S	20
+#define CPFL_ACT_8B_B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S)
+
+/* Unless combining two 8-bit actions into an action set, both A and B fields
+ * must be the same,
+ */
+#define CPFL_ACT_MAKE_8B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \
+	 (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \
+	 (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \
+	 (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \
+	 (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M))
+
+/* 8-Bit Action Indices */
+#define CPFL_ACT_8B_INDEX_MOD_META		9
+
+/* 8-Bit Action Miscellaneous */
+#define CPFL_ACT_8B_MOD_META_PROF_CNT		16
+#define CPFL_ACT_8B_MOD_META_VALID		0x80
+
+/*******************************************************************************
+ * 16-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_16B_S	26
+#define CPFL_ACT_OP_16B_M	CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S)
+#define CPFL_ACT_OP_16B		((uint32_t)0x1 << CPFL_ACT_OP_16B_S)
+
+#define CPFL_ACT_16B_INDEX_S	16
+#define CPFL_ACT_16B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S)
+#define CPFL_ACT_16B_VAL_S	0
+#define CPFL_ACT_16B_VAL_M	CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S)
+
+#define CPFL_ACT_MAKE_16B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \
+	 (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M))
+
+/* 16-Bit Action Indices */
+#define CPFL_ACT_16B_INDEX_COUNT_SET		0
+#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX	1
+#define CPFL_ACT_16B_INDEX_SET_VSI		2
+#define CPFL_ACT_16B_INDEX_DEL_MD		4
+#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST		5
+
+/* 16-Bit Action Miscellaneous */
+#define CPFL_ACT_16B_COUNT_SET_CNT		2048 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_SET_VSI_SLOTS		2
+#define CPFL_ACT_16B_FWD_VSI_CNT		1032 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_FWD_VSI_LIST_CNT		256
+#define CPFL_ACT_16B_MOD_VSI_LIST_CNT		1024
+#define CPFL_ACT_16B_FWD_PORT_CNT		4
+#define CPFL_ACT_16B_DEL_MD_MID_CNT		32
+#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS		4
+
+/* 16-Bit SET_MCAST_IDX Action */
+#define CPFL_ACT_16B_SET_MCAST_VALID	((uint32_t)1 << 15)
+
+/* 16-Bit SET_VSI Action Variants */
+#define CPFL_ACT_16B_SET_VSI_VAL_S		0
+#define CPFL_ACT_16B_SET_VSI_VAL_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S)
+#define CPFL_ACT_16B_SET_VSI_PE_S		11
+#define CPFL_ACT_16B_SET_VSI_PE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S)
+#define CPFL_ACT_16B_SET_VSI_TYPE_S		14
+#define CPFL_ACT_16B_SET_VSI_TYPE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S)
+
+/* 16-Bit DEL_MD Action */
+#define CPFL_ACT_16B_DEL_MD_0_S		0
+#define CPFL_ACT_16B_DEL_MD_1_S		5
+
+/* 16-Bit MOD_VSI_LIST Actions */
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S	0
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M	\
+	CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S)
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S	14
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S)
+#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \
+	((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \
+	 (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_ID_M))
+
+#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \
+	((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \
+		CPFL_ACT_16B_SET_VSI_TYPE_M) | \
+	 (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \
+		CPFL_ACT_16B_SET_VSI_PE_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \
+		CPFL_ACT_16B_SET_VSI_VAL_M))
+
+enum cpfl_prot_eng {
+	CPFL_PE_LAN = 0,
+	CPFL_PE_RDMA,
+	CPFL_PE_CRT
+};
+
+enum cpfl_act_fwd_type {
+	CPFL_ACT_FWD_VSI,
+	CPFL_ACT_FWD_VSI_LIST,
+	CPFL_ACT_FWD_PORT
+};
+
+/*******************************************************************************
+ * 24-Bit Actions
+ ******************************************************************************/
+/* Group A */
+#define CPFL_ACT_OP_24B_A_S	28
+#define CPFL_ACT_OP_24B_A_M	CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S)
+#define CPFL_ACT_24B_A_INDEX_S	24
+#define CPFL_ACT_24B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S)
+#define CPFL_ACT_24B_A_VAL_S	0
+#define CPFL_ACT_24B_A_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S)
+
+#define CPFL_ACT_OP_24B_A	((uint32_t)1 << CPFL_ACT_OP_24B_A_S)
+
+#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_MOD_ADDR	0
+#define CPFL_ACT_24B_INDEX_MIRROR_FIRST	1
+#define CPFL_ACT_24B_INDEX_COUNT	2
+#define CPFL_ACT_24B_INDEX_SET_Q	8
+#define CPFL_ACT_24B_INDEX_MOD_PROFILE	9
+#define CPFL_ACT_24B_INDEX_METER	10
+
+#define CPFL_ACT_24B_COUNT_SLOTS	6
+#define CPFL_ACT_24B_METER_SLOTS	6
+
+#define CPFL_ACT_24B_MOD_ADDR_CNT	(16 * 1024 * 1024)
+#define CPFL_ACT_24B_COUNT_ID_CNT	((uint32_t)1 << 24)
+#define CPFL_ACT_24B_SET_Q_CNT		(12 * 1024)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS	3
+
+/* 24-Bit SET_Q Action */
+#define CPFL_ACT_24B_SET_Q_Q_S		0
+#define CPFL_ACT_24B_SET_Q_Q_M		\
+	CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_S	14
+#define CPFL_ACT_24B_SET_Q_Q_RGN_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S)
+#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS	CPFL_MAKE_MASK32(1, 17)
+#define CPFL_ACT_24B_SET_Q_DST_PE_S	21
+#define CPFL_ACT_24B_SET_Q_DST_PE_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S)
+#define CPFL_ACT_24B_SET_Q_VALID	CPFL_MAKE_MASK32(1, 23)
+
+/* 24-Bit MOD_PROFILE Action */
+enum cpfl_act_mod_profile_hint {
+	CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */
+};
+
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_S		0
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S)
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S	12
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S)
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_S		14
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S)
+#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS		((uint32_t)1 << 16)
+#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND	((uint32_t)1 << 17)
+#define CPFL_ACT_24B_MOD_PROFILE_VALID			((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES	4
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT		2048
+
+/* 24-Bit METER Actions */
+#define CPFL_ACT_24B_METER_INDEX_S	0
+#define CPFL_ACT_24B_METER_INDEX_M	\
+	CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S)
+#define CPFL_ACT_24B_METER_BANK_S	20
+#define CPFL_ACT_24B_METER_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S)
+#define CPFL_ACT_24B_METER_VALID	((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_METER_BANK_CNT	6
+#define CPFL_ACT_24B_METER_INDEX_CNT	((uint32_t)1 << 20)
+
+/* Group B */
+#define CPFL_ACT_OP_24B_B_S	27
+#define CPFL_ACT_OP_24B_B_M	CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S)
+#define CPFL_ACT_24B_B_INDEX_S	24
+#define CPFL_ACT_24B_B_INDEX_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S)
+#define CPFL_ACT_24B_B_VAL_S	0
+#define CPFL_ACT_24B_B_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S)
+
+#define CPFL_ACT_OP_24B_B	((uint32_t)1 << CPFL_ACT_OP_24B_B_S)
+
+#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_SET_MD	0
+#define CPFL_ACT_24B_INDEX_RANGE_CHECK	6
+#define CPFL_ACT_24B_SET_MD_SLOTS	6
+
+/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */
+/* 8-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD8_VAL_S	0
+#define CPFL_ACT_24B_SET_MD8_VAL_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S)
+#define CPFL_ACT_24B_SET_MD8_MASK_S	8
+#define CPFL_ACT_24B_SET_MD8_MASK_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S)
+#define CPFL_ACT_24B_SET_MD8_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD8_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S)
+/* 16-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD16_VAL_S	0
+#define CPFL_ACT_24B_SET_MD16_VAL_M	\
+	CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_L_S	16 /* For chained action */
+#define CPFL_ACT_24B_SET_MD16_MASK_L_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_H_SR	8
+#define CPFL_ACT_24B_SET_MD16_MASK_H_M	0xff
+#define CPFL_ACT_24B_SET_MD16_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD16_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD16		((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_SET_MD32_VAL_L_M	CPFL_MAKE_MASK32(24, 0)
+
+#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX		15
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX	7
+#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX	15
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX	7
+
+/* RANGE_CHECK Action */
+enum cpfl_rule_act_rc_mode {
+	CPFL_RULE_ACT_RC_1_RANGE = 0,
+	CPFL_RULE_ACT_RC_2_RANGES = 1,
+	CPFL_RULE_ACT_RC_4_RANGES = 2,
+	CPFL_RULE_ACT_RC_8_RANGES = 3
+};
+
+#define CPFL_ACT_24B_RC_TBL_IDX_S	0
+#define CPFL_ACT_24B_RC_TBL_IDX_M	\
+	CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S)
+#define CPFL_ACT_24B_RC_START_BANK_S	13
+#define CPFL_ACT_24B_RC_START_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S)
+#define CPFL_ACT_24B_RC_MODE_S		16
+#define CPFL_ACT_24B_RC_MODE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S)
+#define CPFL_ACT_24B_RC_XTRACT_PROF_S	18
+#define CPFL_ACT_24B_RC_XTRACT_PROF_M	\
+	CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S)
+
+#define CPFL_ACT_24B_RC_TBL_INDEX_CNT	(8 * 1024)
+#define CPFL_ACT_24B_RC_BANK_CNT	8
+#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT	64
+
+/*******************************************************************************
+ * 24-Bit Chained Auxiliary Actions
+ ******************************************************************************/
+
+/* TODO: HAS is being updated.  Revise the order of chained and base action
+ * when the HAS has it finalized.
+ */
+/**
+ * 24-Bit Chained SET_MD Actions
+ *
+ * Chained SET_MD actions consume two consecutive action sets.  The first one is
+ * the chained AUX action set.  The second one is the base/parent action set.
+ * Chained SET_MD actions can add and/or update metadata structure with IDs from
+ * 0 to 31 while the non-chained SET_MD variants can only update existing meta-
+ * data IDs below 16.
+ */
+
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S	8
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M	\
+	CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD_AUX_ADD		((uint32_t)1 << 15)
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S	16
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_S		0
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_M		\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S	0
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S)
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR	24 /* Upper 8 bits of MD32 */
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M	0xff
+
+#define CPFL_ACT_TYPE_CHAIN_DATA_S	29
+#define CPFL_ACT_TYPE_CHAIN_DATA_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S)
+#define CPFL_ACT_TYPE_CHAIN_DATA	((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_OP_S	21
+#define CPFL_ACT_24B_SET_MD_OP_8B	((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_16B	((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_32B	((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \
+	(CPFL_ACT_TYPE_CHAIN_DATA | (op) | \
+	 (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \
+	 (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \
+	 (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_DATA_M))
+
+/*******************************************************************************
+ * 1-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_drop - Encode a 1-bit DROP action
+ *
+ * The DROP action has precedence over the DEFER_DOP action.
+ * Affect of ACT_COMMIT action on the DROP action:
+ *  - CPFL_ACT_COMMIT_ALL: Packet is dropped.
+ *  - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped.
+ *  - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped.  Recirculation is canceled.
+ *  - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not
+ *    dropped. Recirculation continues.
+ *
+ * Once a DROP action is set, it cannot be reverted during the classification
+ * process of a network packet.
+ */
+static inline union cpfl_action_set
+cpfl_act_drop(uint8_t prec)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1);
+	return a;
+}
+
+/**
+ * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action
+ * An ACT_COMMIT action specifies if and when all actions are committed.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M,
+				  (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S);
+	return a;
+}
+
+/*******************************************************************************
+ * 8-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_meta - Encode an 8-bit MOD_META action
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_meta(uint8_t prec, uint8_t prof)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META,
+				  CPFL_ACT_8B_MOD_META_VALID | prof);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 16-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI)
+ *
+ * This encodes the "Forward to Single VSI" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    vsi >= CPFL_ACT_16B_FWD_VSI_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port)
+ *
+ * This encodes the "Forward to a port" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    port >= CPFL_ACT_16B_FWD_PORT_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 24-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action
+ *
+ * This MOD_ADDR specifies the index of the MOD content entry an accompanying
+ * MOD_PROFILE action uses.  Some MOD_PROFILE actions may need to use extra
+ * information from a Modify content entry, and requires an accompanying
+ * MOD_ADDR action.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR,
+				     mod_addr);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant)
+ *
+ * This action is a "Forward to a single queue" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q,
+			bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region)
+ *
+ * This action is a "Forward to a queue region" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base,
+			       uint8_t q_rgn_bits, bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT ||
+	    q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base |
+		((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action
+ *
+ * This action specifies a Modify profile to use for modifying the network
+ * packet being classified.  In addition, it also provides a hint to whether
+ * or not an accompanied MOD_ADDR action is expected and should be prefetched.
+ *
+ * There is only one MOD_PROFILE action slot.  If multiple classification blocks
+ * emit this action, the precedence value and auxiliary precedence value will be
+ * used to select one with higher precedence.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus,
+		     bool miss_prepend, enum cpfl_act_mod_profile_hint hint)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) ||
+	    prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT ||
+	    ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_MOD_PROFILE_VALID |
+		(((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) &
+			CPFL_ACT_24B_MOD_PROFILE_HINT_M) |
+		(((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) &
+			CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) |
+		((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S);
+	if (append_act_bus)
+		val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS;
+	if (miss_prepend)
+		val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND;
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_meter - Encode a 24-bit METER action
+ *
+ * Return NOP if any given input parameter is invalid.
+ *
+ * A bank can only be used by one of the METER action slots.  If multiple METER
+ * actions select the same bank, the action with the highest action slot wins.
+ * In Policer mode, METER actions at the higher indexes have precedence over
+ * ones at lower indexes.
+ */
+static inline union cpfl_action_set
+cpfl_act_meter(uint8_t slot, uint8_t prec, uint32_t idx, uint8_t bank)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS  ||
+	    idx >= CPFL_ACT_24B_METER_INDEX_CNT ||
+	    bank >= CPFL_ACT_24B_METER_BANK_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_METER_VALID |
+		(uint32_t)idx << CPFL_ACT_24B_METER_INDEX_S |
+		(uint32_t)bank << CPFL_ACT_24B_METER_BANK_S;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot,
+				     val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot
+ *
+ * This SET_MD action sets/updates a byte of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 bytes of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX ||
+	    off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) |
+		((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) |
+		((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot
+ *
+ * This SET_MD action sets/updates a word of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 words of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX ||
+	    word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) |
+		((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) |
+		((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot
+ *
+ * This SET_MD action sets/updates a dword of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action is made up of 2 chained
+ * action sets.  The chained action set is the first.  The base/parent action
+ * sets is the second.
+ */
+static inline void
+cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid,
+		      uint8_t off, uint32_t val)
+{
+	if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) ||
+	    mid >= CPFL_METADATA_ID_CNT ||
+	    (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) {
+		ext->acts[0] = cpfl_act_nop();
+		ext->acts[1] = cpfl_act_nop();
+	} else {
+		uint32_t tmp;
+
+		/* Chained action set comes first */
+		tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR;
+		ext->acts[0].data =
+			CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B,
+						     mid, off, tmp);
+
+		/* Lower 24 bits of value */
+		tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M;
+		ext->acts[1].data =
+			CPFL_ACT_MAKE_24B_B(prec,
+					    CPFL_ACT_24B_INDEX_SET_MD + slot,
+					    tmp);
+	}
+}
+
+#endif /* _CPFL_ACTIONS_H_ */
diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c
new file mode 100644
index 0000000000..3d259d3da8
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include <base/idpf_controlq.h>
+#include <stdint.h>
+#include "cpfl_rules.h"
+
+ /**
+  * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor
+  */
+static inline uint64_t
+cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
+{
+	uint64_t context = 0;
+
+	switch (cmn_cfg->opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		/* fallthrough */
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+	case cpfl_ctlq_sem_add_rule:
+	case cpfl_ctlq_sem_del_rule:
+	case cpfl_ctlq_sem_query_rule:
+	case cpfl_ctlq_sem_update_rule:
+		context |= SHIFT_VAL64(cmn_cfg->time_sel,
+				       MEV_RULE_TIME_SEL);
+		context |= SHIFT_VAL64(cmn_cfg->time_sel_val,
+				       MEV_RULE_TIME_SEL_VAL);
+		context |= SHIFT_VAL64(cmn_cfg->host_id,
+				       MEV_RULE_HOST_ID);
+		context |= SHIFT_VAL64(cmn_cfg->port_num,
+				       MEV_RULE_PORT_NUM);
+		context |= SHIFT_VAL64(cmn_cfg->resp_req,
+				       MEV_RULE_RESP_REQ);
+		context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru,
+				       MEV_RULE_CACHE_WR_THRU);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc_ctx - get bit context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
+{
+	uint64_t context = 0;
+
+	context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common);
+
+	switch (cfg_data->common.opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size,
+				       MEV_RULE_MOD_OBJ_SIZE);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content,
+				       MEV_RULE_PIN_MOD_CONTENT);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.index,
+				       MEV_RULE_MOD_INDEX);
+		break;
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
+				       MEV_RULE_OBJ_ID);
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
+				       MEV_RULE_OBJ_ADDR);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc - build descriptor data from rule config data
+ *
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg)
+{
+	uint64_t context;
+	uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0];
+
+	context = cpfl_prep_rule_desc_ctx(cfg_data);
+	*ctlq_ctx = CPU_TO_LE64(context);
+	memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t));
+	ctlq_msg->opcode = (uint16_t)cfg_data->common.opc;
+	ctlq_msg->data_len = cfg_data->common.buf_len;
+	ctlq_msg->status = 0;
+	ctlq_msg->ctx.indirect.payload = cfg_data->common.payload;
+}
+
+/**
+ * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob)
+{
+	uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions;
+	const uint32_t *act_src = (const uint32_t *)act_bytes;
+	uint32_t i;
+
+	idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
+	idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len,
+		    CPFL_NONDMA_TO_DMA);
+
+	for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
+		*act_dst++ = CPU_TO_LE32(*act_src++);
+
+	rule_blob->sem_rule.cfg_ctrl[0] = cfg_ctrl & 0xFF;
+	rule_blob->sem_rule.cfg_ctrl[1] = (cfg_ctrl >> 8) & 0xFF;
+}
diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h
new file mode 100644
index 0000000000..d23eae8e91
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_RULES_API_H_
+#define _CPFL_RULES_API_H_
+
+#include <base/idpf_controlq_api.h>
+#include "cpfl_actions.h"
+#include "cpfl_controlq.h"
+
+/* Common Bit Mask Macros */
+#define CPFL_BIT(b)			(1 << (b))
+
+#define MAKE_MASK(type, mask, shift)	((u##type) (mask) << (shift))
+#define SHIFT_VAL_LT(type, val, field)		\
+		(((u##type)(val) << field##_S) & field##_M)
+#define SHIFT_VAL_RT(type, val, field)		\
+		(((u##type)(val) & field##_M) >> field##_S)
+
+#define MAKE_MASK_VAL(type, bit_len)	(((u##type)0x01 << (bit_len)) - 1)
+#define MAKE_MASK_VAL16(bit_len)	MAKE_MASK_VAL(16, bit_len)
+#define MAKE_MASK_VAL64(bit_len)	MAKE_MASK_VAL(64, bit_len)
+
+#define MAKE_MASK64(mask, shift)	MAKE_MASK(64, mask, shift)
+#define MAKE_MASK16(mask, shift)	MAKE_MASK(16, mask, shift)
+#define MAKE_MASK32(mask, shift)	MAKE_MASK(32, mask, shift)
+
+/* Make masks with bit length and left-shifting count */
+#define MAKE_SMASK(type, bits, shift)	\
+	((((u##type)1 << (bits)) - 1) << (shift))
+#define MAKE_SMASK64(bits, shift)	MAKE_SMASK(64, bits, shift)
+#define MAKE_SMASK32(bits, shift)	MAKE_SMASK(32, bits, shift)
+#define MAKE_SMASK16(bits, shift)	MAKE_SMASK(16, bits, shift)
+
+#define SHIFT_VAL64(val, field)		SHIFT_VAL_LT(64, val, field)
+#define SHIFT_VAL32(val, field)		SHIFT_VAL_LT(32, val, field)
+#define SHIFT_VAL16(val, field)		SHIFT_VAL_LT(16, val, field)
+
+/* Rule Config queue opcodes */
+enum cpfl_ctlq_rule_cfg_opc {
+	cpfl_ctlq_sem_add_rule				= 0x1303,
+	cpfl_ctlq_sem_update_rule			= 0x1304,
+	cpfl_ctlq_sem_del_rule				= 0x1305,
+	cpfl_ctlq_sem_query_rule			= 0x1306,
+	cpfl_ctlq_sem_query_rule_hash_addr		= 0x1307,
+	cpfl_ctlq_sem_query_del_rule_hash_addr		= 0x1308,
+
+	cpfl_ctlq_mod_add_update_rule			= 0x1360,
+	cpfl_ctlq_mod_query_rule			= 0x1361,
+};
+
+enum cpfl_cfg_pkt_error_code {
+	CPFL_CFG_PKT_ERR_OK = 0,
+	CPFL_CFG_PKT_ERR_ESRCH = 1,     /* Bad opcode */
+	CPFL_CFG_PKT_ERR_EEXIST = 2,    /* Entry Already exists */
+	CPFL_CFG_PKT_ERR_ENOSPC = 4,    /* No space left in the table*/
+	CPFL_CFG_PKT_ERR_ERANGE = 5,    /* Parameter out of range */
+	CPFL_CFG_PKT_ERR_ESBCOMP = 6,   /* Completion Error */
+	CPFL_CFG_PKT_ERR_ENOPIN = 7,    /* Entry cannot be pinned in cache */
+	CPFL_CFG_PKT_ERR_ENOTFND = 8,   /* Entry Not exists */
+	CPFL_CFG_PKT_ERR_EMAXCOL = 9    /* Max Hash Collision */
+};
+
+/* macros for creating context for rule descriptor */
+#define MEV_RULE_VSI_ID_S		0
+#define MEV_RULE_VSI_ID_M		\
+		MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S)
+
+#define MEV_RULE_TIME_SEL_S		13
+#define MEV_RULE_TIME_SEL_M		\
+		MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S)
+
+#define MEV_RULE_TIME_SEL_VAL_S		15
+#define MEV_RULE_TIME_SEL_VAL_M		\
+		MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S)
+
+#define MEV_RULE_PORT_NUM_S		16
+#define MEV_RULE_HOST_ID_S		18
+#define MEV_RULE_PORT_NUM_M		\
+		MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S)
+#define MEV_RULE_HOST_ID_M		\
+		MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S)
+
+#define MEV_RULE_CACHE_WR_THRU_S	21
+#define MEV_RULE_CACHE_WR_THRU_M	\
+		MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S)
+
+#define MEV_RULE_RESP_REQ_S		22
+#define MEV_RULE_RESP_REQ_M		\
+		MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S)
+#define MEV_RULE_OBJ_ADDR_S		24
+#define MEV_RULE_OBJ_ADDR_M		\
+		MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S)
+#define MEV_RULE_OBJ_ID_S		59
+#define MEV_RULE_OBJ_ID_M		\
+		MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S)
+
+/* macros for creating CFG_CTRL for sem/lem rule blob */
+#define MEV_RULE_CFG_CTRL_PROF_ID_S			0
+#define MEV_RULE_CFG_CTRL_PROF_ID_M			\
+		MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S)
+
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S		11
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M		\
+		MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S)
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_S		13
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S)
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S	14
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M	\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S)
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S		15
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S)
+
+/**
+ * macro to build the CFG_CTRL for rule packet data, which is one of
+ * cpfl_prep_sem_rule_blob()'s input parameter.
+ */
+ /* build SEM CFG_CTRL*/
+#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id,		       \
+				       pin_to_cache, fixed_fetch)	       \
+		(SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID)	     | \
+		 SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID)   | \
+		 SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE)    | \
+		 SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH))
+
+/* build LEM CFG_CTRL*/
+#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror)    \
+		(SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID)             | \
+		 SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE)      | \
+		 SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR))
+
+/* macros for creating mod content config packets */
+#define MEV_RULE_MOD_INDEX_S		24
+#define MEV_RULE_MOD_INDEX_M		\
+		MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S)
+
+#define MEV_RULE_PIN_MOD_CONTENT_S	62
+#define MEV_RULE_PIN_MOD_CONTENT_M	\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S)
+#define MEV_RULE_MOD_OBJ_SIZE_S		63
+#define MEV_RULE_MOD_OBJ_SIZE_M		\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S)
+
+/**
+ * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM
+ * note: The key may be in mixed big/little endian format, the rest of members
+ * are in little endian
+ */
+struct cpfl_sem_rule_cfg_pkt {
+#define MEV_SEM_RULE_KEY_SIZE 128
+	uint8_t key[MEV_SEM_RULE_KEY_SIZE];
+
+#define MEV_SEM_RULE_ACT_SIZE 72
+	uint8_t actions[MEV_SEM_RULE_ACT_SIZE];
+
+	/* Bit(s):
+	 * 10:0 : PROFILE_ID
+	 * 12:11: SUB_PROF_ID (used for SEM only)
+	 * 13   : pin the SEM key content into the cache
+	 * 14   : Reserved
+	 * 15   : Fixed_fetch
+	 */
+	uint8_t cfg_ctrl[2];
+
+	/* Bit(s):
+	 * 0:     valid
+	 * 15:1:  Hints
+	 * 26:16: PROFILE_ID, the profile associated with the entry
+	 * 31:27: PF
+	 * 55:32: FLOW ID (assigned by HW)
+	 * 63:56: EPOCH
+	 */
+	uint8_t ctrl_word[8];
+	uint8_t padding[46];
+};
+
+/**
+ * union cpfl_rule_cfg_pkt_record - Describes rule data blob
+ */
+union cpfl_rule_cfg_pkt_record {
+	struct cpfl_sem_rule_cfg_pkt sem_rule;
+	uint8_t pkt_data[256];
+	uint8_t mod_blob[256];
+};
+
+/**
+ * cpfl_rule_query_addr - LEM/SEM Rule Query Address structure
+ */
+struct cpfl_rule_query_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_query_del_addr - Rule Query and Delete Address
+ */
+struct cpfl_rule_query_del_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_mod_content - MOD Rule Content
+ */
+struct cpfl_rule_mod_content {
+	uint8_t	obj_size;
+	uint8_t	pin_content;
+	uint32_t	index;
+};
+
+/**
+ * cpfl_rule_cfg_data_common - data struct for all rule opcodes
+ *note: some rules may only require part of structure
+ */
+struct cpfl_rule_cfg_data_common {
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	uint64_t	cookie;
+	uint16_t	vsi_id;
+	uint8_t	port_num;
+	uint8_t	host_id;
+	uint8_t	time_sel;
+	uint8_t	time_sel_val;
+	uint8_t	cache_wr_thru;
+	uint8_t	resp_req;
+	uint32_t	ret_val;
+	uint16_t	buf_len;
+	struct idpf_dma_mem *payload;
+};
+
+/**
+ * cpfl_rule_cfg_data - rule config data
+ * note: Before sending rule to HW, caller needs to fill
+ *       in this struct then call cpfl_prep_rule_desc().
+ */
+struct cpfl_rule_cfg_data {
+	struct cpfl_rule_cfg_data_common common;
+	union {
+		struct cpfl_rule_query_addr query_addr;
+		struct cpfl_rule_query_del_addr query_del_addr;
+		struct cpfl_rule_mod_content mod_content;
+	} ext;
+};
+
+/**
+ * cpfl_fill_rule_mod_content - fill info for mod content
+ */
+static inline void
+cpfl_fill_rule_mod_content(uint8_t mod_obj_size,
+			   uint8_t pin_mod_content,
+			   uint32_t mod_index,
+			   struct cpfl_rule_mod_content *mod_content)
+{
+	mod_content->obj_size = mod_obj_size;
+	mod_content->pin_content = pin_mod_content;
+	mod_content->index = mod_index;
+}
+
+/**
+ * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes
+ * note: call this function before calls cpfl_prep_rule_desc()
+ */
+static inline void
+cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc,
+			       uint64_t cookie,
+			       uint16_t vsi_id,
+			       uint8_t port_num,
+			       uint8_t host_id,
+			       uint8_t time_sel,
+			       uint8_t time_sel_val,
+			       uint8_t cache_wr_thru,
+			       uint8_t resp_req,
+			       uint16_t payload_len,
+			       struct idpf_dma_mem *payload,
+			       struct cpfl_rule_cfg_data_common *cfg_cmn)
+{
+	cfg_cmn->opc = opc;
+	cfg_cmn->cookie = cookie;
+	cfg_cmn->vsi_id = vsi_id;
+	cfg_cmn->port_num = port_num;
+	cfg_cmn->resp_req = resp_req;
+	cfg_cmn->ret_val = 0;
+	cfg_cmn->host_id = host_id;
+	cfg_cmn->time_sel = time_sel;
+	cfg_cmn->time_sel_val = time_sel_val;
+	cfg_cmn->cache_wr_thru = cache_wr_thru;
+
+	cfg_cmn->buf_len = payload_len;
+	cfg_cmn->payload = payload;
+}
+
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg);
+
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob);
+
+#endif /* _CPFL_RULES_API_H_ */
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 290ff1e655..e2b6621cea 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -19,6 +19,7 @@ sources = files(
         'cpfl_vchnl.c',
         'cpfl_representor.c',
         'cpfl_controlq.c',
+	'cpfl_rules.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying
  2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
                                   ` (4 preceding siblings ...)
  2023-09-08 16:05                 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
@ 2023-09-08 16:05                 ` Zhang, Yuying
  2023-09-08 16:05                 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
                                   ` (2 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-08 16:05 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add a new module that implements FXP rule creation / destroying.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c   |  31 ++++
 drivers/net/cpfl/cpfl_ethdev.h   |   6 +
 drivers/net/cpfl/cpfl_fxp_rule.c | 263 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_fxp_rule.h |  68 ++++++++
 drivers/net/cpfl/meson.build     |   1 +
 5 files changed, 369 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a2bc6784d0..762fbddfe6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 #include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
 	adapter->vports[vport->sw_idx] = NULL;
+	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
 	rte_free(cpfl_vport);
 
 	return 0;
@@ -2466,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
 	return 0;
 }
 
+int
+cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size,
+			 int batch_size)
+{
+	int i;
+
+	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
+		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < batch_size; i++) {
+		dma[i].va = (void *)((char *)orig_dma->va + size * (i + 1));
+		dma[i].pa = orig_dma->pa + size * (i + 1);
+		dma[i].size = size;
+		dma[i].zone = NULL;
+	}
+	return 0;
+}
+
 static int
 cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
@@ -2515,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
 			    &dev->data->mac_addrs[0]);
 
+	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
+	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
+				       cpfl_vport->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		goto err_mac_addrs;
+
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
 		memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
 		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7f83d170d7..8eeeac9910 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -147,10 +147,14 @@ enum cpfl_itf_type {
 
 TAILQ_HEAD(cpfl_flow_list, rte_flow);
 
+#define CPFL_FLOW_BATCH_SIZE  490
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
 	struct cpfl_flow_list flow_list;
+	struct idpf_dma_mem flow_dma;
+	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
 	void *data;
 };
 
@@ -240,6 +244,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
+int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma,
+			     uint32_t size, int batch_size);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
new file mode 100644
index 0000000000..ea65e20507
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.c
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		   struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_msg **msg_ptr_list;
+	u16 clean_count = 0;
+	int num_cleaned = 0;
+	int retries = 0;
+	int ret = 0;
+
+	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+	if (!msg_ptr_list) {
+		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+		goto send_err;
+	}
+
+	while (retries <= CTLQ_SEND_RETRIES) {
+		clean_count = num_q_msg - num_cleaned;
+		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+					       &msg_ptr_list[num_cleaned]);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+			goto send_err;
+		}
+
+		num_cleaned += clean_count;
+		retries++;
+		if (num_cleaned >= num_q_msg)
+			break;
+		rte_delay_us_sleep(10);
+	}
+
+	if (retries > CTLQ_SEND_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for completions");
+		ret = -1;
+		goto send_err;
+	}
+
+send_err:
+	if (msg_ptr_list)
+		free(msg_ptr_list);
+err:
+	return ret;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		      struct idpf_ctlq_msg q_msg[])
+{
+	int retries = 0;
+	struct idpf_dma_mem *dma;
+	u16 i;
+	uint16_t buff_cnt;
+	int ret = 0;
+
+	retries = 0;
+	while (retries <= CTLQ_RECEIVE_RETRIES) {
+		rte_delay_us_sleep(10);
+		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+		    ret != CPFL_ERR_CTLQ_ERROR) {
+			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_EMPTY)
+			break;
+
+		/* TODO - process rx controlq message */
+		for (i = 0; i < num_q_msg; i++) {
+			if (q_msg[i].data_len > 0)
+				dma = q_msg[i].ctx.indirect.payload;
+			else
+				dma = NULL;
+
+			buff_cnt = dma ? 1 : 0;
+			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+			if (ret)
+				PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+		}
+		break;
+	}
+
+	if (retries > CTLQ_RECEIVE_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+static int
+cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		   struct idpf_ctlq_msg *msg)
+{
+	struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	struct cpfl_rule_cfg_data cfg = {0};
+
+	/* prepare rule blob */
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(&cfg, 0, sizeof(cfg));
+
+	/* fill info for both query and add/update */
+	cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+				   minfo->pin_mod_content,
+				   minfo->mod_index,
+				   &cfg.ext.mod_content);
+
+	/* only fill content for add/update */
+	memcpy(blob->mod_blob, minfo->mod_content,
+	       minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+	/* pack message */
+	cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+				       rinfo->cookie,
+				       0, /* vsi_id not used for mod */
+				       rinfo->port_num,
+				       NO_HOST_NEEDED,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       (u16)sizeof(*blob),
+				       (void *)dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		       struct idpf_ctlq_msg *msg, bool add)
+{
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	struct cpfl_rule_cfg_data cfg;
+	uint16_t cfg_ctrl;
+
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(msg, 0, sizeof(*msg));
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+							  rinfo->sem.sub_prof_id,
+							  rinfo->sem.pin_to_cache,
+							  rinfo->sem.fixed_fetch);
+		cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+					rinfo->act_bytes, rinfo->act_byte_len,
+					cfg_ctrl, blob);
+		opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+	} else {
+		PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
+		return -1;
+	}
+
+	cpfl_fill_rule_cfg_data_common(opc,
+				       rinfo->cookie,
+				       rinfo->vsi,
+				       rinfo->port_num,
+				       rinfo->host_id,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+	       struct idpf_ctlq_msg *msg, bool add)
+{
+	int ret = 0;
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
+			ret = -1;
+	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+		if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)
+			ret = -1;
+	} else {
+		PMD_INIT_LOG(ERR, "Invalid type of rule");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+int
+cpfl_rule_process(struct cpfl_itf *itf,
+		  struct idpf_ctlq_info *tx_cq,
+		  struct idpf_ctlq_info *rx_cq,
+		  struct cpfl_rule_info *rinfo,
+		  int rule_num,
+		  bool add)
+{
+	struct idpf_hw *hw = &itf->adapter->base.hw;
+	int i;
+	int ret = 0;
+
+	if (rule_num == 0)
+		return 0;
+
+	for (i = 0; i < rule_num; i++) {
+		ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Could not pack rule");
+			return ret;
+		}
+	}
+	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to send control message");
+		return ret;
+	}
+	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to update rule");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
new file mode 100644
index 0000000000..ed757b80b1
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+	uint16_t prof_id;
+	uint8_t sub_prof_id;
+	uint8_t key[CPFL_MAX_KEY_LEN];
+	uint8_t key_byte_len;
+	uint8_t pin_to_cache;
+	uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+	uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+	uint8_t mod_content_byte_len;
+	uint32_t mod_index;
+	uint8_t pin_mod_content;
+	uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+	CPFL_RULE_TYPE_NONE,
+	CPFL_RULE_TYPE_SEM,
+	CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+	enum cpfl_rule_type type;
+	uint64_t cookie;
+	uint8_t host_id;
+	uint8_t port_num;
+	uint8_t resp_req;
+	/* TODO: change this to be dynamically allocated/reallocated */
+	uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+	uint8_t act_byte_len;
+	/* vsi is used for lem and lpm rules */
+	uint16_t vsi;
+	uint8_t clear_mirror_1st_state;
+	/* mod related fields */
+	union {
+		struct cpfl_mod_rule_info mod;
+		struct cpfl_sem_rule_info sem;
+	};
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_process(struct cpfl_itf *itf,
+		      struct idpf_ctlq_info *tx_cq,
+		      struct idpf_ctlq_info *rx_cq,
+		      struct cpfl_rule_info *rinfo,
+		      int rule_num,
+		      bool add);
+int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		       struct idpf_ctlq_msg q_msg[]);
+int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+			  struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index e2b6621cea..6118a16329 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -45,6 +45,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
     sources += files(
 	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
+	    'cpfl_fxp_rule.c',
     )
     ext_deps += jansson_dep
 endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine
  2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
                                   ` (5 preceding siblings ...)
  2023-09-08 16:05                 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
@ 2023-09-08 16:05                 ` Zhang, Yuying
  2023-09-08 16:05                 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
  2023-09-08 16:05                 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-08 16:05 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Adapt FXP implementation to a flow engine

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 doc/guides/nics/cpfl.rst                |  18 +-
 doc/guides/rel_notes/release_23_11.rst  |   1 +
 drivers/net/cpfl/cpfl_ethdev.h          |  27 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 582 ++++++++++++++++++++++++
 drivers/net/cpfl/meson.build            |   1 +
 5 files changed, 627 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index e17347d15c..ae5487f2f6 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -197,8 +197,22 @@ low level hardware resources.
 
     * For Ubuntu, it can be installed using `apt install libjansson-dev`
 
-- run testpmd with the json file
+- run testpmd with the json file, create two vports
 
    .. code-block:: console
 
-   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0-1],flow_parser="refpkg.json" -- -i
+
+#. Create one flow to forward ETH-IPV4-TCP from I/O port to a local(CPF's) vport. Flow should be created on
+   vport X. Group M should match fxp module. Action port_representor Y means forward packet to local vport Y::
+
+   .. code-block:: console
+
+   flow create X ingress group M pattern eth dst is 00:01:00:00:03:14 / ipv4 src is 192.168.0.1 \
+   dst is 192.168.0.2 / tcp / end actions port_representor port_id Y / end
+
+#. Send a matched packet, and it should be displayed on PMD::
+
+   .. code-block:: console
+
+   sendp(Ether(dst='00:01:00:00:03:14')/IP(src='192.168.0.1',dst='192.168.0.2')/TCP(),iface="ens25f0")
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 8536ce88f4..16cdd674d3 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -85,6 +85,7 @@ New Features
 * **Updated Intel cpfl driver.**
 
   * Added support for port representor.
+  * Added support for rte_flow.
 
 * **Updated Intel iavf driver.**
   * Added support for iavf auto-reset.
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 8eeeac9910..efb0eb5251 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -85,6 +85,8 @@
 
 #define CPFL_RX_CFGQ_NUM	4
 #define CPFL_TX_CFGQ_NUM	4
+#define CPFL_FPCP_CFGQ_TX	0
+#define CPFL_FPCP_CFGQ_RX	1
 #define CPFL_CFGQ_NUM		8
 
 /* bit[15:14] type
@@ -219,6 +221,8 @@ struct cpfl_adapter_ext {
 	struct rte_hash *repr_allowlist_hash;
 
 	struct cpfl_flow_js_parser *flow_parser;
+	struct rte_bitmap *mod_bm;
+	void *mod_bm_mem;
 
 	struct cpfl_metadata meta;
 
@@ -312,4 +316,27 @@ cpfl_get_vsi_id(struct cpfl_itf *itf)
 	return CPFL_INVALID_HW_ID;
 }
 
+static inline struct cpfl_itf *
+cpfl_get_itf_by_port_id(uint16_t port_id)
+{
+	struct rte_eth_dev *dev;
+
+	if (port_id >= RTE_MAX_ETHPORTS) {
+		PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS);
+		return NULL;
+	}
+
+	dev = &rte_eth_devices[port_id];
+	if (dev->state == RTE_ETH_DEV_UNUSED) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id);
+		return NULL;
+	}
+
+	if (!dev->data) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id);
+		return NULL;
+	}
+
+	return CPFL_DEV_TO_ITF(dev);
+}
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
new file mode 100644
index 0000000000..4c7b4deb7a
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -0,0 +1,582 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <math.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include <ethdev_driver.h>
+#include "cpfl_rules.h"
+#include "cpfl_logs.h"
+#include "cpfl_ethdev.h"
+#include "cpfl_flow.h"
+#include "cpfl_fxp_rule.h"
+#include "cpfl_flow_parser.h"
+
+#define CPFL_COOKIE_DEF		0x1000
+#define CPFL_MOD_COOKIE_DEF	0x1237561
+#define CPFL_PREC_DEF		1
+#define CPFL_PREC_SET		5
+#define CPFL_TYPE_ID		3
+#define CPFL_OFFSET		0x0a
+#define CPFL_HOST_ID_DEF	0
+#define CPFL_PF_NUM_DEF		0
+#define CPFL_PORT_NUM_DEF	0
+#define CPFL_RESP_REQ_DEF	2
+#define CPFL_PIN_TO_CACHE_DEF	0
+#define CPFL_CLEAR_MIRROR_1ST_STATE_DEF	0
+#define CPFL_FIXED_FETCH_DEF	0
+#define CPFL_PTI_DEF		0
+#define CPFL_MOD_OBJ_SIZE_DEF	0
+#define CPFL_PIN_MOD_CONTENT_DEF	0
+
+#define CPFL_MAX_MOD_CONTENT_INDEX	256
+#define CPFL_MAX_MR_ACTION_NUM	8
+
+/* Struct used when parse detailed rule information with json file */
+struct cpfl_rule_info_meta {
+	struct cpfl_flow_pr_action pr_action;	/* json action field of pattern rule */
+	uint32_t pr_num;			/* number of pattern rules */
+	uint32_t mr_num;			/* number of modification rules */
+	uint32_t rule_num;			/* number of all rules */
+	struct cpfl_rule_info rules[0];
+};
+
+static uint32_t cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad);
+static void cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
+uint64_t cpfl_rule_cookie = CPFL_COOKIE_DEF;
+
+static int
+cpfl_fxp_create(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim = meta;
+	struct cpfl_vport *vport;
+
+	if (!rim)
+		return ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		/* Every vport has one pair control queues configured to handle message.
+		 * Even index is tx queue and odd index is rx queue.
+		 */
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
+				rim->rules, rim->rule_num, true);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "cpfl filter create flow fail");
+		rte_free(rim);
+		return ret;
+	}
+
+	flow->rule = rim;
+
+	return ret;
+}
+
+static inline void
+cpfl_fxp_rule_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+	flow->rule = NULL;
+}
+
+static int
+cpfl_fxp_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim;
+	uint32_t i;
+	struct cpfl_vport *vport;
+
+	rim = flow->rule;
+	if (!rim) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "no such flow create by cpfl filter");
+
+		return -rte_errno;
+	}
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		ret = -rte_errno;
+		goto err;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules,
+				rim->rule_num, false);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to destroy cpfl filter rule");
+		goto err;
+	}
+
+	/* free mod index */
+	for (i = rim->pr_num; i < rim->rule_num; i++)
+		cpfl_fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
+err:
+	cpfl_fxp_rule_free(flow);
+	return ret;
+}
+
+static bool
+cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action,
+		       struct cpfl_rule_info_meta *rim,
+		       int i)
+{
+	if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		struct cpfl_rule_info *rinfo = &rim->rules[i];
+
+		rinfo->type = CPFL_RULE_TYPE_SEM;
+		rinfo->sem.prof_id = pr_action->sem.prof;
+		rinfo->sem.sub_prof_id = pr_action->sem.subprof;
+		rinfo->sem.key_byte_len = pr_action->sem.keysize;
+		memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len);
+		rinfo->sem.pin_to_cache = CPFL_PIN_TO_CACHE_DEF;
+		rinfo->sem.fixed_fetch = CPFL_FIXED_FETCH_DEF;
+	} else {
+		PMD_DRV_LOG(ERR, "Invalid pattern item.");
+		return false;
+	}
+
+	return true;
+}
+
+static int
+cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_rule_info *match_rinfo,
+		       struct cpfl_rule_info *mod_rinfo,
+		       const struct cpfl_flow_mr_action *mr_action)
+{
+	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
+	uint32_t mod_idx;
+	int i;
+	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
+	union cpfl_action_set *act_set =
+		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
+
+	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
+		return -EINVAL;
+
+	*act_set = cpfl_act_mod_profile(CPFL_PREC_DEF,
+					mr_action->mod.prof,
+					CPFL_PTI_DEF,
+					0, /* append */
+					0, /* prepend */
+					CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_idx = cpfl_fxp_mod_idx_alloc(adapter);
+	if (mod_idx == CPFL_MAX_MOD_CONTENT_INDEX) {
+		PMD_DRV_LOG(ERR, "Out of Mod Index.");
+		return -ENOMEM;
+	}
+
+	*act_set = cpfl_act_mod_addr(CPFL_PREC_DEF, mod_idx);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
+	minfo->mod_obj_size = CPFL_MOD_OBJ_SIZE_DEF;
+	minfo->pin_mod_content = CPFL_PIN_MOD_CONTENT_DEF;
+	minfo->mod_index = mod_idx;
+	mod_rinfo->cookie = CPFL_MOD_COOKIE_DEF;
+	mod_rinfo->port_num = CPFL_PORT_NUM_DEF;
+	mod_rinfo->resp_req = CPFL_RESP_REQ_DEF;
+
+	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
+	for (i = 0; i < minfo->mod_content_byte_len; i++)
+		minfo->mod_content[i] = mr_action->mod.data[i];
+
+	return 0;
+}
+
+#define CPFL_FXP_MAX_QREGION_SIZE 128
+#define CPFL_INVALID_QUEUE_ID -2
+static int
+cpfl_fxp_parse_action(struct cpfl_itf *itf,
+		      const struct rte_flow_action *actions,
+		      const struct cpfl_flow_mr_action *mr_action,
+		      struct cpfl_rule_info_meta *rim,
+		      int priority,
+		      int index)
+{
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *rss;
+	struct rte_eth_dev_data *data;
+	enum rte_flow_action_type action_type;
+	struct cpfl_vport *vport;
+	/* used when action is PORT_REPRESENTOR type */
+	struct cpfl_itf *dst_itf;
+	uint16_t dev_id; /* vsi id */
+	int queue_id = -1;
+	bool fwd_vsi = false;
+	bool fwd_q = false;
+	uint32_t i;
+	struct cpfl_rule_info *rinfo = &rim->rules[index];
+	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
+
+	priority = CPFL_PREC_MAX - priority;
+	for (action = actions; action->type !=
+	     RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			if (!fwd_vsi)
+				fwd_vsi = true;
+			else
+				goto err;
+
+			act_ethdev = action->conf;
+			dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id);
+
+			if (!dst_itf)
+				goto err;
+
+			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
+				vport = (struct cpfl_vport *)dst_itf;
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			} else {
+				queue_id = CPFL_INVALID_QUEUE_ID;
+			}
+
+			dev_id = cpfl_get_vsi_id(dst_itf);
+
+			if (dev_id == CPFL_INVALID_HW_ID)
+				goto err;
+
+			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			act_q = action->conf;
+			data = itf->data;
+			if (act_q->index >= data->nb_rx_queues)
+				goto err;
+
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += act_q->index;
+			*act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			if (rss->queue_num <= 1)
+				goto err;
+			for (i = 0; i < rss->queue_num - 1; i++) {
+				if (rss->queue[i + 1] != rss->queue[i] + 1)
+					goto err;
+			}
+			data = itf->data;
+			if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues)
+				goto err;
+			if (!(rte_is_power_of_2(rss->queue_num) &&
+			      rss->queue_num <= CPFL_FXP_MAX_QREGION_SIZE))
+				goto err;
+
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += rss->queue[0];
+			*act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id,
+								  log(rss->queue_num) / log(2), 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			(*act_set).data = cpfl_act_drop(priority).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			(*act_set).data = cpfl_act_set_commit_mode(priority, 0).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			goto err;
+		}
+	}
+
+	if (mr_action) {
+		uint32_t i;
+
+		for (i = 0; i < rim->mr_num; i++)
+			if (cpfl_parse_mod_content(itf->adapter, rinfo,
+						   &rim->rules[rim->pr_num + i],
+						   &mr_action[i]))
+				goto err;
+	}
+
+	return 0;
+
+err:
+	PMD_DRV_LOG(ERR, "Invalid action type");
+	return -EINVAL;
+}
+
+static void
+cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo)
+{
+	if (cpfl_rule_cookie == ~0llu)
+		cpfl_rule_cookie = CPFL_COOKIE_DEF;
+	rinfo->cookie = cpfl_rule_cookie++;
+	rinfo->host_id = CPFL_HOST_ID_DEF;
+	rinfo->port_num = CPFL_PORT_NUM_DEF;
+	rinfo->resp_req = CPFL_RESP_REQ_DEF;
+	rinfo->clear_mirror_1st_state = CPFL_CLEAR_MIRROR_1ST_STATE_DEF;
+}
+
+static bool
+cpfl_is_mod_action(const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			return true;
+		default:
+			continue;
+		}
+	}
+	return false;
+}
+
+static int
+cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      void **meta)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_pr_action pr_action = { 0 };
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_flow_mr_action mr_action[CPFL_MAX_MR_ACTION_NUM] = { 0 };
+	uint32_t pr_num = 0;
+	uint32_t mr_num = 0;
+	struct cpfl_rule_info_meta *rim;
+	int ret;
+
+	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "No Match pattern support.");
+		return -EINVAL;
+	}
+
+	if (cpfl_is_mod_action(actions)) {
+		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "action parse fails.");
+			return -EINVAL;
+		}
+		mr_num++;
+	}
+
+	pr_num = 1;
+	rim = rte_zmalloc(NULL,
+			  sizeof(struct cpfl_rule_info_meta) +
+			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
+			  0);
+	if (!rim)
+		return -ENOMEM;
+
+	rim->pr_action = pr_action;
+	rim->pr_num = pr_num;
+	rim->mr_num = mr_num;
+	rim->rule_num = pr_num + mr_num;
+
+	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid pattern");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid action");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	cpfl_fill_rinfo_default_value(&rim->rules[0]);
+
+	if (!meta)
+		rte_free(rim);
+	else
+		*meta = rim;
+
+	return 0;
+}
+
+static int
+cpfl_fxp_mod_init(struct cpfl_adapter_ext *ad)
+{
+	uint32_t size = rte_bitmap_get_memory_footprint(CPFL_MAX_MOD_CONTENT_INDEX);
+	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+	if (!mem)
+		return -ENOMEM;
+
+	/* a set bit represent a free slot */
+	ad->mod_bm = rte_bitmap_init_with_all_set(CPFL_MAX_MOD_CONTENT_INDEX, mem, size);
+	if (!ad->mod_bm) {
+		rte_free(mem);
+		return -EINVAL;
+	}
+
+	ad->mod_bm_mem = mem;
+
+	return 0;
+}
+
+static void
+cpfl_fxp_mod_uninit(struct cpfl_adapter_ext *ad)
+{
+	rte_free(ad->mod_bm_mem);
+	ad->mod_bm_mem = NULL;
+	ad->mod_bm = NULL;
+}
+
+static uint32_t
+cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad)
+{
+	uint64_t slab = 0;
+	uint32_t pos = 0;
+
+	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
+		return CPFL_MAX_MOD_CONTENT_INDEX;
+
+	pos += __builtin_ffsll(slab) - 1;
+	rte_bitmap_clear(ad->mod_bm, pos);
+
+	return pos;
+}
+
+static void
+cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
+{
+	rte_bitmap_set(ad->mod_bm, idx);
+}
+
+static int
+cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
+	       struct rte_flow *flow __rte_unused,
+	       struct rte_flow_query_count *count __rte_unused,
+	       struct rte_flow_error *error)
+{
+	rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_HANDLE,
+			   NULL,
+			   "count action not supported by this module");
+
+	return -rte_errno;
+}
+
+static void
+cpfl_fxp_uninit(struct cpfl_adapter_ext *ad)
+{
+	cpfl_fxp_mod_uninit(ad);
+}
+
+static int
+cpfl_fxp_init(struct cpfl_adapter_ext *ad)
+{
+	int ret = 0;
+
+	ret = cpfl_fxp_mod_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct
+cpfl_flow_engine cpfl_fxp_engine = {
+	.type = CPFL_FLOW_ENGINE_FXP,
+	.init = cpfl_fxp_init,
+	.uninit = cpfl_fxp_uninit,
+	.create = cpfl_fxp_create,
+	.destroy = cpfl_fxp_destroy,
+	.query_count = cpfl_fxp_query,
+	.parse_pattern_action = cpfl_fxp_parse_pattern_action,
+};
+
+RTE_INIT(cpfl_sw_engine_init)
+{
+	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
+
+	cpfl_flow_engine_register(engine);
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 6118a16329..5fd1cbd045 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -46,6 +46,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
 	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
 	    'cpfl_fxp_rule.c',
+	    'cpfl_flow_engine_fxp.c',
     )
     ext_deps += jansson_dep
 endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 8/9] net/cpfl: support flow ops on representor
  2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
                                   ` (6 preceding siblings ...)
  2023-09-08 16:05                 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
@ 2023-09-08 16:05                 ` Zhang, Yuying
  2023-09-08 16:05                 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-08 16:05 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add flow ops support for representor, so representor can
create, destroy, validate and flush rules.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 74 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.c     | 29 ++++++++++
 2 files changed, 103 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index 4c7b4deb7a..7a3376f9f6 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -72,6 +72,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 	struct cpfl_adapter_ext *ad = itf->adapter;
 	struct cpfl_rule_info_meta *rim = meta;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	if (!rim)
 		return ret;
@@ -82,6 +83,10 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 		 * Even index is tx queue and odd index is rx queue.
 		 */
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -121,6 +126,7 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	uint32_t i;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	rim = flow->rule;
 	if (!rim) {
@@ -134,6 +140,10 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	if (itf->type == CPFL_ITF_TYPE_VPORT) {
 		vport = (struct cpfl_vport *)itf;
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -413,6 +423,64 @@ cpfl_is_mod_action(const struct rte_flow_action actions[])
 	return false;
 }
 
+static bool
+cpfl_fxp_get_metadata_port(struct cpfl_itf *itf,
+			   const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	const struct rte_flow_action_ethdev *ethdev;
+	struct cpfl_itf *target_itf;
+	bool ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		ret = cpfl_metadata_write_port_id(itf);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "fail to write port id");
+			return false;
+		}
+	}
+
+	ret = cpfl_metadata_write_sourcevsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write source vsi id");
+		return false;
+	}
+
+	ret = cpfl_metadata_write_vsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write vsi id");
+		return false;
+	}
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			ethdev = (const struct rte_flow_action_ethdev *)action->conf;
+			target_itf = cpfl_get_itf_by_port_id(ethdev->port_id);
+			if (!target_itf) {
+				PMD_DRV_LOG(ERR, "fail to get target_itf by port id");
+				return false;
+			}
+			ret = cpfl_metadata_write_targetvsi(target_itf);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "fail to write target vsi id");
+				return false;
+			}
+			break;
+		default:
+			continue;
+		}
+	}
+
+	return true;
+}
+
 static int
 cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
@@ -429,6 +497,12 @@ cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	int ret;
 
+	ret = cpfl_fxp_get_metadata_port(itf, actions);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Fail to save metadata.");
+		return -EINVAL;
+	}
+
 	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "No Match pattern support.");
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 4d15a26c80..de3b426727 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,8 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 static int
 cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
@@ -374,6 +376,22 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
+static int
+cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev,
+			   const struct rte_flow_ops **ops)
+{
+	if (!dev)
+		return -EINVAL;
+
+#ifdef RTE_HAS_JANSSON
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -385,6 +403,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
 
 	.link_update		= cpfl_repr_link_update,
+	.flow_ops_get		= cpfl_dev_repr_flow_ops_get,
 };
 
 static int
@@ -393,6 +412,7 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
 	struct cpfl_repr_param *param = init_param;
 	struct cpfl_adapter_ext *adapter = param->adapter;
+	int ret;
 
 	repr->repr_id = param->repr_id;
 	repr->vport_info = param->vport_info;
@@ -402,6 +422,15 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
 		repr->func_up = true;
 
+	TAILQ_INIT(&repr->itf.flow_list);
+	memset(repr->itf.dma, 0, sizeof(repr->itf.dma));
+	memset(repr->itf.msg, 0, sizeof(repr->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		return ret;
+
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 9/9] net/cpfl: support represented port action
  2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
                                   ` (7 preceding siblings ...)
  2023-09-08 16:05                 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
@ 2023-09-08 16:05                 ` Zhang, Yuying
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-08 16:05 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Support RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT action for forwarding
packet to APF/CPF/VF representors.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index 7a3376f9f6..ddede2f553 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -266,6 +266,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 	int queue_id = -1;
 	bool fwd_vsi = false;
 	bool fwd_q = false;
+	bool is_vsi;
 	uint32_t i;
 	struct cpfl_rule_info *rinfo = &rim->rules[index];
 	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
@@ -276,6 +277,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 		action_type = action->type;
 		switch (action_type) {
 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
 			if (!fwd_vsi)
 				fwd_vsi = true;
 			else
@@ -294,12 +296,20 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 				queue_id = CPFL_INVALID_QUEUE_ID;
 			}
 
-			dev_id = cpfl_get_vsi_id(dst_itf);
+			is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
+				  dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR);
+			if (is_vsi)
+				dev_id = cpfl_get_vsi_id(dst_itf);
+			else
+				dev_id = cpfl_get_port_id(dst_itf);
 
 			if (dev_id == CPFL_INVALID_HW_ID)
 				goto err;
 
-			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			if (is_vsi)
+				*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			else
+				*act_set = cpfl_act_fwd_port(0, priority, 0, dev_id);
 			act_set++;
 			rinfo->act_byte_len += sizeof(union cpfl_action_set);
 			break;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v3 1/9] net/cpfl: parse flow parser file in devargs
  2023-09-06  9:33     ` [PATCH v3 1/9] net/cpfl: parse flow parser file in devargs Wenjing Qiao
@ 2023-09-11  0:48       ` Wu, Jingjing
  0 siblings, 0 replies; 128+ messages in thread
From: Wu, Jingjing @ 2023-09-11  0:48 UTC (permalink / raw)
  To: Qiao, Wenjing, Zhang, Yuying, dev, Zhang, Qi Z, Xing, Beilei; +Cc: Liu, Mingxia



> -----Original Message-----
> From: Qiao, Wenjing <wenjing.qiao@intel.com>
> Sent: Wednesday, September 6, 2023 5:34 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; dev@dpdk.org; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Cc: Liu, Mingxia <mingxia.liu@intel.com>; Qiao, Wenjing
> <wenjing.qiao@intel.com>
> Subject: [PATCH v3 1/9] net/cpfl: parse flow parser file in devargs
> 
> Add devargs "flow_parser" for rte_flow json parser.
> 
> Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
> ---
>  doc/guides/nics/cpfl.rst       | 32 ++++++++++++++++++++++++++++
>  drivers/net/cpfl/cpfl_ethdev.c | 38
> +++++++++++++++++++++++++++++++++-
>  drivers/net/cpfl/cpfl_ethdev.h |  3 +++
>  drivers/net/cpfl/meson.build   |  6 ++++++
>  4 files changed, 78 insertions(+), 1 deletion(-)
> 
> diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
> index c20334230b..7032dd1a1a 100644
> --- a/doc/guides/nics/cpfl.rst
> +++ b/doc/guides/nics/cpfl.rst
> @@ -128,12 +128,24 @@ Runtime Configuration
> 
>      -a BDF,representor=vf[0-3],representor=c1pf1
> 
> +- ``flow_parser`` (default ``not enabled``)
> +
> +  The PMD supports using a JSON file to parse rte_flow tokens into low level
> hardware
> +  resources defined in a DDP package file.
> +
> +  The user can specify the path of json file, for example::
> +
> +    -a ca:00.0,flow_parser="refpkg.json"
> +
> +  Then the PMD will load json file for device ``ca:00.0``.
> +  The parameter is optional.
> 
>  Driver compilation and testing
>  ------------------------------
> 
>  Refer to the document :doc:`build_and_test` for details.
> 
> +Rte flow need to install json-c library.
> 
>  Features
>  --------
> @@ -164,3 +176,23 @@ Hairpin queue
>  E2100 Series can loopback packets from RX port to TX port.
>  This feature is called port-to-port or hairpin.
>  Currently, the PMD only supports single port hairpin.
> +
> +Rte_flow
> +~~~~~~~~~~~~~
> +
> +Rte_flow uses a json file to direct CPF PMD to parse rte_flow tokens into
> +low level hardware resources defined in a DDP package file.
> +
> +#. install json-c library::
> +
> +   .. code-block:: console
> +
> +   git clone https://github.com/json-c/json-c.git
> +   cd json-c
> +   git checkout 777dd06be83ef7fac71c2218b565557cd068a714
> +
Json-c is the dependency, we can install by package management tool, such as apt, can you add that refer?
If we need to install from source code, version number might be better that commit id.

^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v3 2/9] net/cpfl: add flow json parser
  2023-09-06  9:34     ` [PATCH v3 2/9] net/cpfl: add flow json parser Wenjing Qiao
  2023-09-08  6:26       ` Liu, Mingxia
@ 2023-09-11  6:24       ` Wu, Jingjing
  1 sibling, 0 replies; 128+ messages in thread
From: Wu, Jingjing @ 2023-09-11  6:24 UTC (permalink / raw)
  To: Qiao, Wenjing, Zhang, Yuying, dev, Zhang, Qi Z, Xing, Beilei; +Cc: Liu, Mingxia

> +static int
> +cpfl_json_object_to_int(json_object *object, const char *name, int *value)
> +{
> +	json_object *subobject;
> +
> +	if (!object) {
> +		PMD_DRV_LOG(ERR, "object doesn't exist.");
> +		return -EINVAL;
> +	}
> +	subobject = json_object_object_get(object, name);
> +	if (!subobject) {
> +		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
> +		return -EINVAL;
> +	}
> +	*value = json_object_get_int(subobject);
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_json_object_to_uint16(json_object *object, const char *name, uint16_t
> *value)
> +{
Looks no need to define a new function as there is no difference with cpfl_json_object_to_int func beside the type of return value.
[...]

> +
> +static int
> +cpfl_flow_js_pattern_key_proto_field(json_object *cjson_field,
> +				     struct cpfl_flow_js_pr_key_proto *js_field)
> +{
> +	int len, i;
> +
> +	if (!cjson_field)
> +		return 0;
> +	len = json_object_array_length(cjson_field);
> +	js_field->fields_size = len;
> +	if (len == 0)
Move if check above, before set js_field->fields_size?

> +		return 0;
> +	js_field->fields =
> +	    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) *
> len, 0);
> +	if (!js_field->fields) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +	for (i = 0; i < len; i++) {
> +		json_object *object;
> +		const char *name, *mask;
> +
> +		object = json_object_array_get_idx(cjson_field, i);
> +		name = cpfl_json_object_to_string(object, "name");
> +		if (!name) {
> +			PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
> +			goto err;
> +		}
> +		if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
> +			PMD_DRV_LOG(ERR, "The 'name' is too long.");
> +			goto err;
> +		}
> +		memcpy(js_field->fields[i].name, name, strlen(name));
Is js_field->fields[i].name zeroed? If not, using strlen() cannot guarantee string copy correct. 

> +		if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
> +		    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
> +			mask = cpfl_json_object_to_string(object, "mask");
> +			if (!mask) {
> +				PMD_DRV_LOG(ERR, "Can not parse string
> 'mask'.");
> +				goto err;
> +			}
> +			memcpy(js_field->fields[i].mask, mask, strlen(mask));
The same as above.

> +		} else {
> +			uint32_t mask_32b;
> +			int ret;
> +
> +			ret = cpfl_json_object_to_uint32(object, "mask",
> &mask_32b);
> +			if (ret < 0) {
> +				PMD_DRV_LOG(ERR, "Can not parse uint32
> 'mask'.");
> +				goto err;
> +			}
> +			js_field->fields[i].mask_32b = mask_32b;
> +		}
> +	}
> +
> +	return 0;
> +
> +err:
> +	rte_free(js_field->fields);
> +	return -EINVAL;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_key_proto(json_object *cjson_pr_key_proto, struct
> cpfl_flow_js_pr *js_pr)
> +{
> +	int len, i, ret;
> +
> +	len = json_object_array_length(cjson_pr_key_proto);
> +	js_pr->key.proto_size = len;
> +	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct
> cpfl_flow_js_pr_key_proto) * len, 0);
> +	if (!js_pr->key.protocols) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +
> +	for (i = 0; i < len; i++) {
> +		json_object *object, *cjson_pr_key_proto_fields;
> +		const char *type;
> +		enum rte_flow_item_type item_type;
> +
> +		object = json_object_array_get_idx(cjson_pr_key_proto, i);
> +		/* pr->key->proto->type */
> +		type = cpfl_json_object_to_string(object, "type");
> +		if (!type) {
> +			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
> +			goto err;
> +		}
> +		item_type = cpfl_get_item_type_by_str(type);
> +		if (item_type == RTE_FLOW_ITEM_TYPE_VOID)
> +			goto err;
> +		js_pr->key.protocols[i].type = item_type;
> +		/* pr->key->proto->fields */
> +		cjson_pr_key_proto_fields = json_object_object_get(object,
> "fields");
> +		ret =
> cpfl_flow_js_pattern_key_proto_field(cjson_pr_key_proto_fields,
> +							   &js_pr-
> >key.protocols[i]);
> +		if (ret < 0)
> +			goto err;
> +	}
> +
> +	return 0;
> +
> +err:
> +	rte_free(js_pr->key.protocols);
> +	return -EINVAL;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_act_fv_proto(json_object *cjson_value, struct
> cpfl_flow_js_fv *js_fv)
> +{
> +	uint16_t layer = 0, offset = 0, mask = 0;
> +	const char *header;
> +	enum rte_flow_item_type type;
> +	int ret;
> +
> +	ret = cpfl_json_object_to_uint16(cjson_value, "layer", &layer);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
> +		return -EINVAL;
> +	}
> +
> +	header = cpfl_json_object_to_string(cjson_value, "header");
> +	if (!header) {
> +		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
> +		return -EINVAL;
> +	}
> +	ret = cpfl_json_object_to_uint16(cjson_value, "offset", &offset);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
> +		return -EINVAL;
> +	}
> +	ret = cpfl_json_object_to_uint16(cjson_value, "mask", &mask);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
> +		return -EINVAL;
> +	}
> +	js_fv->proto.layer = layer;
> +	js_fv->proto.offset = offset;
> +	js_fv->proto.mask = mask;
> +	type = cpfl_get_item_type_by_str(header);
You can put this after get header, if no item type, return earlier.

> +	if (type == RTE_FLOW_ITEM_TYPE_VOID)
> +		return -EINVAL;
> +	js_fv->proto.header = type;
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_act_fv_metadata(json_object *cjson_value, struct
> cpfl_flow_js_fv *js_fv)
> +{
> +	int ret;
> +
> +	ret = cpfl_json_object_to_uint16(cjson_value, "type", &js_fv-
> >meta.type);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
Is the log correct?

> +		return ret;
> +	}
> +	ret = cpfl_json_object_to_uint16(cjson_value, "offset", &js_fv-
> >meta.offset);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
Same as above.
> +		return ret;
> +	}
> +	ret = cpfl_json_object_to_uint16(cjson_value, "mask", &js_fv-
> >meta.mask);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
> +		return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_act_fv(json_object *cjson_fv, struct
> cpfl_flow_js_pr_action *js_act)
> +{
> +	int len, i;
> +
> +	len = json_object_array_length(cjson_fv);
Better to check the len here?

> +	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len,
> 0);
> +	if (!js_act->sem.fv) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +	js_act->sem.fv_size = len;
> +	for (i = 0; i < len; i++) {
> +		struct cpfl_flow_js_fv *js_fv;
> +		json_object *object, *cjson_value;
> +		uint16_t offset = 0;
> +		const char *type;
> +		int ret;
> +
> +		object = json_object_array_get_idx(cjson_fv, i);
> +		js_fv = &js_act->sem.fv[i];
> +		ret = cpfl_json_object_to_uint16(object, "offset", &offset);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
> +			goto err;
> +		}
> +		js_fv->offset = offset;
> +		type = cpfl_json_object_to_string(object, "type");
> +		if (!type) {
> +			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
> +			goto err;
> +		}
> +		cjson_value = json_object_object_get(object, "value");
> +		if (strcmp(type, "immediate") == 0) {
> +			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
> +			js_fv->immediate = json_object_get_int(cjson_value);
> +		} else if (strcmp(type, "metadata") == 0) {
> +			js_fv->type = CPFL_FV_TYPE_METADATA;
> +			cpfl_flow_js_pattern_act_fv_metadata(cjson_value,
> js_fv);
> +		} else if (strcmp(type, "protocol") == 0) {
> +			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
> +			cpfl_flow_js_pattern_act_fv_proto(cjson_value,
> js_fv);
> +		} else {
> +			PMD_DRV_LOG(ERR, "Not support this type: %s.",
> type);
> +			goto err;
> +		}
> +	}
> +
> +	return 0;
> +
> +err:
> +	rte_free(js_act->sem.fv);
> +	return -EINVAL;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_per_act(json_object *cjson_per_act, struct
> cpfl_flow_js_pr_action *js_act)
> +{
> +	const char *type;
> +	int ret;
> +
> +	/* pr->actions->type */
> +	type = cpfl_json_object_to_string(cjson_per_act, "type");
> +	if (!type) {
> +		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
> +		return -EINVAL;
> +	}
> +	/* pr->actions->data */
> +	if (strcmp(type, "sem") == 0) {
> +		json_object *cjson_fv, *cjson_pr_action_sem;
> +
> +		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
> +		cjson_pr_action_sem =
> json_object_object_get(cjson_per_act, "data");
> +		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem,
> "profile",
> +						 &js_act->sem.prof);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
> +			return -EINVAL;
> +		}
> +		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem,
> "subprofile",
> +						 &js_act->sem.subprof);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
> +			return -EINVAL;
> +		}
> +		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem,
> "keysize",
> +						 &js_act->sem.keysize);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
> +			return -EINVAL;
> +		}
> +		cjson_fv = json_object_object_get(cjson_pr_action_sem,
> "fieldvectors");
> +		ret = cpfl_flow_js_pattern_act_fv(cjson_fv, js_act);
> +		if (ret < 0)
> +			return ret;
> +	} else {
> +		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_flow_js_pattern_act(json_object *cjson_pr_act, struct cpfl_flow_js_pr
> *js_pr)
> +{
> +	int i, len, ret;
> +
> +	len = json_object_array_length(cjson_pr_act);
Check len?
The same comments for following code where get length and them allocate memory, 
[...]

> +int
> +cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
> +{
> +	int i, j;
> +
> +	if (!parser)
> +		return 0;
> +
> +	for (i = 0; i < parser->pr_size; i++) {
> +		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
> +
> +		if (!pattern)
> +			return -EINVAL;
I think the destroy might continue, if so, use continue but not just return.

> +		for (j = 0; j < pattern->key.proto_size; j++)
> +			rte_free(pattern->key.protocols[j].fields);
> +		rte_free(pattern->key.protocols);
> +		rte_free(pattern->key.attributes);
> +
> +		for (j = 0; j < pattern->actions_size; j++) {
> +			struct cpfl_flow_js_pr_action *pr_act;
> +
> +			pr_act = &pattern->actions[j];
> +			cpfl_parser_free_pr_action(pr_act);
> +		}
> +		rte_free(pattern->actions);
> +	}
> +	rte_free(parser->patterns);
> +	for (i = 0; i < parser->mr_size; i++) {
> +		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
> +
> +		if (!mr)
> +			return -EINVAL;
I think the destroy might continue, if so, use continue but not just return.
[...]

> +static int
> +cpfl_str2mac(const char *mask, uint8_t *addr_bytes)
How about to use rte_ether_unformat_addr instead of defining a new one?
[...]

> +
> +/* output: struct cpfl_flow_mr_key_action *mr_key_action */
> +/* check and parse */
> +static int
> +cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int
> size,
> +			 const struct rte_flow_action *actions,
> +			 struct cpfl_flow_mr_key_action *mr_key_action)
> +{
> +	int actions_length, i;
> +	int j = 0;
> +	int ret;
> +
> +	actions_length = cpfl_get_actions_length(actions);
> +	if (size > actions_length - 1)
> +		return -EINVAL;
> +	for (i = 0; i < size; i++) {
> +		enum rte_flow_action_type type;
> +		struct cpfl_flow_js_mr_key_action *key_act;
> +
> +		key_act = &key_acts[i];
> +		/* mr->key->actions->type */
> +		type = key_act->type;
> +		/* mr->key->actions->data */
> +		/* match: <type> action matches
> RTE_FLOW_ACTION_TYPE_<type> */
> +		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
> +			int proto_size, k;
> +			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
> +
> +			while (j < actions_length &&
> +			       actions[j].type !=
> RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
> +				j++;
> +			}
> +			if (j >= actions_length)
> +				return -EINVAL;
> +			mr_key_action[i].type =
> RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
> +			mr_key_action[i].encap.action = &actions[j];
> +			encap = &mr_key_action[i].encap;
> +
> +			proto_size = key_act->encap.proto_size;
> +			encap->proto_size = proto_size;
> +			for (k = 0; k < proto_size; k++) {
> +				enum rte_flow_item_type proto;
> +
> +				proto = key_act->encap.protocols[k];
> +				encap->protocols[k] = proto;
> +			}
> +			ret = cpfl_check_actions_vxlan_encap(encap,
> &actions[j]);
> +			if (ret < 0)
> +				return -EINVAL;
> +
> +			j++;
> +		} else if (type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
> +			while (j < actions_length &&
> +			       actions[j].type !=
> RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
> +				j++;
> +			}
> +			if (j >= actions_length)
> +				return -EINVAL;
> +
> +			mr_key_action[i].type =
> RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
> +			j++;
> +		} else {
> +			PMD_DRV_LOG(ERR, "Not support this type: %d.",
> type);
> +			return -EPERM;
> +		}
Shouldn't reset j to 0 after one loop?

> +	}
> +
> +	return 0;
> +}
> +
> +/* output: uint8_t *buffer, uint16_t *byte_len */
> +static int
> +cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
> +		  struct cpfl_flow_mr_key_action *mr_key_action,
> +		  uint8_t *buffer, uint16_t *byte_len)
> +{
> +	int i;
> +	int start = 0;
> +
> +	for (i = 0; i < layout_size; i++) {
> +		int index, size, offset;
> +		const char *hint;
> +		const uint8_t *addr;
> +		struct cpfl_flow_mr_key_action *temp;
> +		struct cpfl_flow_js_mr_layout *layout;
> +
> +		layout = &layouts[i];
> +		/* index links to the element of the actions array. */
> +		index = layout->index;
> +		size = layout->size;
> +		offset = layout->offset;
> +		if (index == -1) {
> +			hint = "dummpy";
> +			start += size;
> +			continue;
> +		}
> +		hint = layout->hint;
> +		addr = NULL;
Why set it to NULL here?
[...]

> +void
> +cpfl_metadata_write16(struct cpfl_metadata *meta, int type, int offset,
> uint16_t data)
> +{
> +	rte_memcpy(&meta->chunks[type].data[offset],
> +		   &data,
> +		   sizeof(uint16_t));
> +}
> +
> +void
> +cpfl_metadata_write32(struct cpfl_metadata *meta, int type, int offset,
> uint32_t data)
> +{
> +	rte_memcpy(&meta->chunks[type].data[offset],
> +		   &data,
> +		   sizeof(uint32_t));
> +}
> +
> +uint16_t
> +cpfl_metadata_read16(struct cpfl_metadata *meta, int type, int offset)
> +{
> +	return *((uint16_t *)(&meta->chunks[type].data[offset]));
> +}
> +

Those functions seem short enough, how about to define them as inline or macro?

> +bool
> +cpfl_metadata_write_port_id(struct cpfl_itf *itf)
> +{
> +	uint32_t dev_id;
> +	const int type = 0;
> +	const int offset = 5;
> +
> +	dev_id = cpfl_get_port_id(itf);
> +	if (dev_id == CPFL_INVALID_HW_ID) {
> +		PMD_DRV_LOG(ERR, "fail to get hw ID\n");
> +		return false;
> +	}
> +	dev_id = dev_id << 3;
> +	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
> +
> +	return true;
> +}
> +
> +bool
> +cpfl_metadata_write_targetvsi(struct cpfl_itf *itf)
> +{
> +	uint32_t dev_id;
> +	const int type = 6;
> +	const int offset = 2;
> +
> +	dev_id = cpfl_get_vsi_id(itf);
> +	if (dev_id == CPFL_INVALID_HW_ID) {
> +		PMD_DRV_LOG(ERR, "fail to get hw ID");
> +		return false;
> +	}
> +	dev_id = dev_id << 1;
> +	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
> +
> +	return true;
> +}
> +
> +bool
> +cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf)
> +{
> +	uint32_t dev_id;
> +	const int type = 6;
> +	const int offset = 0;
> +
> +	dev_id = cpfl_get_vsi_id(itf);
> +	if (dev_id == CPFL_INVALID_HW_ID) {
> +		PMD_DRV_LOG(ERR, "fail to get hw ID");
> +		return false;
> +	}
> +	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
> +
> +	return true;
> +}
> +
> +void
> +cpfl_metadata_init(struct cpfl_metadata *meta)
> +{
> +	int i;
> +
> +	for (i = 0; i < CPFL_META_LENGTH; i++)
> +		meta->chunks[i].type = i;
> +}
> +
> +bool cpfl_metadata_write_vsi(struct cpfl_itf *itf)
> +{
> +	uint32_t dev_id;
> +	const int type = 0;
> +	const int offset = 24;
> +
> +	dev_id = cpfl_get_vsi_id(itf);
> +	if (dev_id == CPFL_INVALID_HW_ID) {
> +		PMD_DRV_LOG(ERR, "fail to get hw ID");
> +		return false;
> +	}
> +	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
> +
> +	return true;
> +}

In general, this patch is too large,  better to split for review.

^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v3 4/9] net/cpfl: setup ctrl path
  2023-09-06  9:34     ` [PATCH v3 4/9] net/cpfl: setup ctrl path Wenjing Qiao
@ 2023-09-11  6:30       ` Liu, Mingxia
  2023-09-11  6:36       ` Wu, Jingjing
  1 sibling, 0 replies; 128+ messages in thread
From: Liu, Mingxia @ 2023-09-11  6:30 UTC (permalink / raw)
  To: Qiao, Wenjing, Zhang, Yuying, dev, Zhang, Qi Z, Wu, Jingjing,
	Xing, Beilei



> -----Original Message-----
> From: Qiao, Wenjing <wenjing.qiao@intel.com>
> Sent: Wednesday, September 6, 2023 5:34 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; dev@dpdk.org; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Cc: Liu, Mingxia <mingxia.liu@intel.com>; Qiao, Wenjing
> <wenjing.qiao@intel.com>
> Subject: [PATCH v3 4/9] net/cpfl: setup ctrl path
> 
> Setup the control vport and control queue for flow offloading.
> 
> Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.c | 267
> +++++++++++++++++++++++++++++++++  drivers/net/cpfl/cpfl_ethdev.h |
> 14 ++  drivers/net/cpfl/cpfl_vchnl.c  | 144 ++++++++++++++++++
>  3 files changed, 425 insertions(+)
> 
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
> index 3c4a6a4724..22f3e72894 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -1657,6 +1657,10 @@ cpfl_handle_vchnl_event_msg(struct
> cpfl_adapter_ext *adapter, uint8_t *msg, uint
>  		return;
>  	}
> 
> +	/* ignore if it is ctrl vport */
> +	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
> +		return;
> +
>  	vport = cpfl_find_vport(adapter, vc_event->vport_id);
>  	if (!vport) {
>  		PMD_DRV_LOG(ERR, "Can't find vport."); @@ -1852,6
> +1856,260 @@ cpfl_dev_alarm_handler(void *param)
>  	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler,
> adapter);  }
> 
> +static int
> +cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter) {
> +	int i, ret;
> +
> +	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
> +		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i,
> false, false);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Fail to disable Tx config
> queue.");
> +			return ret;
> +		}
> +	}
> +
> +	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
> +		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i,
> true, false);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Fail to disable Rx config
> queue.");
> +			return ret;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter) {
> +	int i, ret;
> +
> +	ret = cpfl_config_ctlq_tx(adapter);
> +	if (ret) {
> +		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
> +		return ret;
> +	}
> +
> +	ret = cpfl_config_ctlq_rx(adapter);
> +	if (ret) {
> +		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
> +		return ret;
> +	}
> +
> +	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
> +		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i,
> false, true);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Fail to enable Tx config
> queue.");
> +			return ret;
> +		}
> +	}
> +
> +	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
> +		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i,
> true, true);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Fail to enable Rx config
> queue.");
> +			return ret;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static void
> +cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter) {
> +	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
> +	struct cpfl_ctlq_create_info *create_cfgq_info;
> +	int i;
> +
> +	create_cfgq_info = adapter->cfgq_info;
> +
> +	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
> +		cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
[Liu, Mingxia] Sometimes adapter->ctlqp[i] maybe NULL, then the an error will be reported in cpfl_vport_ctlq_remove (), right? 
Such as when this function is called by cpfl_add_cfgqs(), So better to check if adapter->ctlqp[i] == NULL.

> +		if (create_cfgq_info[i].ring_mem.va)
> +			idpf_free_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].ring_mem);
> +		if (create_cfgq_info[i].buf_mem.va)
> +			idpf_free_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].buf_mem);
> +	}
> +}
> +
> +static int
> +cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter) {
> +	struct idpf_ctlq_info *cfg_cq;
> +	int ret = 0;
> +	int i = 0;
> +
> +	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
> +		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter-
> >base.hw),
> +					  &adapter->cfgq_info[i],
> +					  &cfg_cq);
> +		if (ret || !cfg_cq) {
[Liu, Mingxia] Before each loop, better to set cfg_cq with NULL ?

> +			PMD_DRV_LOG(ERR, "ctlq add failed for queue
> id: %d",
> +				    adapter->cfgq_info[i].id);
> +			cpfl_remove_cfgqs(adapter);
> +			return ret;
> +		}
> +		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
> +			    adapter->cfgq_info[i].id);
> +		adapter->ctlqp[i] = cfg_cq;
> +	}
> +
> +	return ret;
> +}
> +
> +#define CPFL_CFGQ_RING_LEN		512
> +#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
> +#define CPFL_CFGQ_BUFFER_SIZE		256
> +#define CPFL_CFGQ_RING_SIZE		512
> +
> +static int
> +cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter) {
> +	struct cpfl_ctlq_create_info *create_cfgq_info;
> +	struct cpfl_vport *vport;
> +	int i, err;
> +	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct
> idpf_ctlq_desc);
> +	uint32_t buf_size = CPFL_CFGQ_RING_SIZE *
> CPFL_CFGQ_BUFFER_SIZE;
> +
> +	vport = &adapter->ctrl_vport;
> +	create_cfgq_info = adapter->cfgq_info;
> +
> +	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
> +		if (i % 2 == 0) {
> +			/* Setup Tx config queue */
> +			create_cfgq_info[i].id = vport-
> >base.chunks_info.tx_start_qid + i / 2;
> +			create_cfgq_info[i].type =
> IDPF_CTLQ_TYPE_CONFIG_TX;
> +			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
> +			create_cfgq_info[i].buf_size =
> CPFL_CFGQ_BUFFER_SIZE;
> +			memset(&create_cfgq_info[i].reg, 0, sizeof(struct
> idpf_ctlq_reg));
> +			create_cfgq_info[i].reg.tail = vport-
> >base.chunks_info.tx_qtail_start +
> +				i / 2 * vport-
> >base.chunks_info.tx_qtail_spacing;
> +		} else {
> +			/* Setup Rx config queue */
> +			create_cfgq_info[i].id = vport-
> >base.chunks_info.rx_start_qid + i / 2;
> +			create_cfgq_info[i].type =
> IDPF_CTLQ_TYPE_CONFIG_RX;
> +			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
> +			create_cfgq_info[i].buf_size =
> CPFL_CFGQ_BUFFER_SIZE;
> +			memset(&create_cfgq_info[i].reg, 0, sizeof(struct
> idpf_ctlq_reg));
> +			create_cfgq_info[i].reg.tail = vport-
> >base.chunks_info.rx_qtail_start +
> +				i / 2 * vport-
> >base.chunks_info.rx_qtail_spacing;
> +			if (!idpf_alloc_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].buf_mem,
> +						buf_size)) {
> +				err = -ENOMEM;
> +				goto free_mem;
> +			}
> +		}
> +		if (!idpf_alloc_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].ring_mem,
> +					ring_size)) {
> +			err = -ENOMEM;
> +			goto free_mem;
> +		}
> +	}
> +	return 0;
> +free_mem:
> +	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
> +		if (create_cfgq_info[i].ring_mem.va)
> +			idpf_free_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].ring_mem);
> +		if (create_cfgq_info[i].buf_mem.va)
> +			idpf_free_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].buf_mem);
> +	}
> +	return err;
> +}
> +
> +static int
> +cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter) {
> +	struct cpfl_vport *vport = &adapter->ctrl_vport;
> +	struct virtchnl2_create_vport *vport_info =
> +		(struct virtchnl2_create_vport *)adapter-
> >ctrl_vport_recv_info;
> +	int i;
> +
> +	vport->itf.adapter = adapter;
> +	vport->base.adapter = &adapter->base;
> +	vport->base.vport_id = vport_info->vport_id;
> +
> +	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
> +		if (vport_info->chunks.chunks[i].type ==
> VIRTCHNL2_QUEUE_TYPE_TX) {
> +			vport->base.chunks_info.tx_start_qid =
> +				vport_info-
> >chunks.chunks[i].start_queue_id;
> +			vport->base.chunks_info.tx_qtail_start =
> +			vport_info->chunks.chunks[i].qtail_reg_start;
> +			vport->base.chunks_info.tx_qtail_spacing =
> +			vport_info->chunks.chunks[i].qtail_reg_spacing;
> +		} else if (vport_info->chunks.chunks[i].type ==
> VIRTCHNL2_QUEUE_TYPE_RX) {
> +			vport->base.chunks_info.rx_start_qid =
> +				vport_info-
> >chunks.chunks[i].start_queue_id;
> +			vport->base.chunks_info.rx_qtail_start =
> +			vport_info->chunks.chunks[i].qtail_reg_start;
> +			vport->base.chunks_info.rx_qtail_spacing =
> +			vport_info->chunks.chunks[i].qtail_reg_spacing;
> +		} else {
> +			PMD_INIT_LOG(ERR, "Unsupported chunk type");
> +			return -EINVAL;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static void
> +cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter) {
> +	cpfl_remove_cfgqs(adapter);
> +	cpfl_stop_cfgqs(adapter);
> +	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
[Liu, Mingxia] Should in the reverse order as cpfl_ctrl_path_open(), cpfl_stop_cfgqs () -> cpfl_remove_cfgqs () -> idpf_vc_vport_destroy(), ?
> +}
> +
> +static int
> +cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter) {
> +	int ret;
> +
> +	ret = cpfl_vc_create_ctrl_vport(adapter);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to create control vport");
> +		return ret;
> +	}
> +
> +	ret = cpfl_init_ctrl_vport(adapter);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to init control vport");
> +		goto err_init_ctrl_vport;
> +	}
> +
> +	ret = cpfl_cfgq_setup(adapter);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to setup control queues");
> +		goto err_cfgq_setup;
> +	}
> +
> +	ret = cpfl_add_cfgqs(adapter);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to add control queues");
> +		goto err_add_cfgq;
> +	}
> +
> +	ret = cpfl_start_cfgqs(adapter);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to start control queues");
> +		goto err_start_cfgqs;
> +	}
> +
> +	return 0;
> +
> +err_start_cfgqs:
> +	cpfl_stop_cfgqs(adapter);
> +err_add_cfgq:
> +	cpfl_remove_cfgqs(adapter);
> +err_cfgq_setup:
> +err_init_ctrl_vport:
> +	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
> +
> +	return ret;
> +}
> +
>  static struct virtchnl2_get_capabilities req_caps = {
>  	.csum_caps =
>  	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
> @@ -2019,6 +2277,12 @@ cpfl_adapter_ext_init(struct rte_pci_device
> *pci_dev, struct cpfl_adapter_ext *a
>  		goto err_vports_alloc;
>  	}
> 
> +	ret = cpfl_ctrl_path_open(adapter);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to setup control path");
> +		goto err_create_ctrl_vport;
> +	}
> +
>  	adapter->cur_vports = 0;
>  	adapter->cur_vport_nb = 0;
> 
> @@ -2026,6 +2290,8 @@ cpfl_adapter_ext_init(struct rte_pci_device
> *pci_dev, struct cpfl_adapter_ext *a
> 
>  	return ret;
> 
> +err_create_ctrl_vport:
> +	rte_free(adapter->vports);
>  err_vports_alloc:
>  	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
>  	cpfl_repr_whitelist_uninit(adapter);
> @@ -2260,6 +2526,7 @@ cpfl_find_adapter_ext(struct rte_pci_device
> *pci_dev)  static void  cpfl_adapter_ext_deinit(struct cpfl_adapter_ext
> *adapter)  {
> +	cpfl_ctrl_path_close(adapter);
>  	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
>  	cpfl_vport_map_uninit(adapter);
>  	idpf_adapter_deinit(&adapter->base);
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
> index 2151605987..40bba8da00 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -22,6 +22,7 @@
>  #include "cpfl_logs.h"
>  #include "cpfl_cpchnl.h"
>  #include "cpfl_representor.h"
> +#include "cpfl_controlq.h"
> 
>  /* Currently, backend supports up to 8 vports */
>  #define CPFL_MAX_VPORT_NUM	8
> @@ -89,6 +90,10 @@
> 
>  #define CPFL_FLOW_FILE_LEN 100
> 
> +#define CPFL_RX_CFGQ_NUM	4
> +#define CPFL_TX_CFGQ_NUM	4
> +#define CPFL_CFGQ_NUM		8
> +
>  #define CPFL_INVALID_HW_ID	UINT16_MAX
>  #define CPFL_META_CHUNK_LENGTH	1024
>  #define CPFL_META_LENGTH	32
> @@ -204,11 +209,20 @@ struct cpfl_adapter_ext {
>  	rte_spinlock_t repr_lock;
>  	struct rte_hash *repr_whitelist_hash;
> 
> +	/* ctrl vport and ctrl queues. */
> +	struct cpfl_vport ctrl_vport;
> +	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
> +	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
> +	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
> +
>  	struct cpfl_metadata meta;
>  };
> 
>  TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
> 
> +int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter); int
> +cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter); int
> +cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
>  int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
>  			   struct cpfl_vport_id *vport_identity,
>  			   struct cpchnl2_vport_info *vport_info); diff --git
> a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c index
> a21a4a451f..932840a972 100644
> --- a/drivers/net/cpfl/cpfl_vchnl.c
> +++ b/drivers/net/cpfl/cpfl_vchnl.c
> @@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext
> *adapter,
> 
>  	return 0;
>  }
> +
> +int
> +cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter) {
> +	struct virtchnl2_create_vport vport_msg;
> +	struct idpf_cmd_info args;
> +	int err = -1;
> +
> +	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
> +	vport_msg.vport_type =
> rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
> +	vport_msg.txq_model =
> rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
> +	vport_msg.rxq_model =
> rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
> +	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
> +	vport_msg.num_tx_complq = 0;
> +	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
> +	vport_msg.num_rx_bufq = 0;
> +
> +	memset(&args, 0, sizeof(args));
> +	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
> +	args.in_args = (uint8_t *)&vport_msg;
> +	args.in_args_size = sizeof(vport_msg);
> +	args.out_buffer = adapter->base.mbx_resp;
> +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> +	err = idpf_vc_cmd_execute(&adapter->base, &args);
> +	if (err) {
> +		PMD_DRV_LOG(ERR,
> +			    "Failed to execute command of
> VIRTCHNL2_OP_CREATE_VPORT");
> +		return err;
> +	}
> +
> +	rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
> +		   IDPF_DFLT_MBX_BUF_SIZE);
> +	return err;
> +}
> +
> +int
> +cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter) {
> +	struct cpfl_vport *vport = &adapter->ctrl_vport;
> +	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
> +	struct virtchnl2_rxq_info *rxq_info;
> +	struct idpf_cmd_info args;
> +	uint16_t num_qs;
> +	int size, err, i;
> +
> +	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> +		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
> +		err = -EINVAL;
> +		return err;
> +	}
> +
> +	num_qs = CPFL_RX_CFGQ_NUM;
> +	size = sizeof(*vc_rxqs) + (num_qs - 1) *
> +		sizeof(struct virtchnl2_rxq_info);
> +	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
> +	if (!vc_rxqs) {
> +		PMD_DRV_LOG(ERR, "Failed to allocate
> virtchnl2_config_rx_queues");
> +		err = -ENOMEM;
> +		return err;
> +	}
> +	vc_rxqs->vport_id = vport->base.vport_id;
> +	vc_rxqs->num_qinfo = num_qs;
> +
> +	for (i = 0; i < num_qs; i++) {
> +		rxq_info = &vc_rxqs->qinfo[i];
> +		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]-
> >desc_ring.pa;
> +		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
> +		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
> +		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
> +		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i +
> 1].buf_size;
> +		rxq_info->max_pkt_size = vport->base.max_pkt_len;
> +		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
> +		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
> +		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
> +	}
> +
> +	memset(&args, 0, sizeof(args));
> +	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
> +	args.in_args = (uint8_t *)vc_rxqs;
> +	args.in_args_size = size;
> +	args.out_buffer = adapter->base.mbx_resp;
> +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> +	err = idpf_vc_cmd_execute(&adapter->base, &args);
> +	rte_free(vc_rxqs);
> +	if (err)
> +		PMD_DRV_LOG(ERR, "Failed to execute command of
> +VIRTCHNL2_OP_CONFIG_RX_QUEUES");
> +
> +	return err;
> +}
> +
> +int
> +cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter) {
> +	struct cpfl_vport *vport = &adapter->ctrl_vport;
> +	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
> +	struct virtchnl2_txq_info *txq_info;
> +	struct idpf_cmd_info args;
> +	uint16_t num_qs;
> +	int size, err, i;
> +
> +	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> +		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
> +		err = -EINVAL;
> +		return err;
> +	}
> +
> +	num_qs = CPFL_TX_CFGQ_NUM;
> +	size = sizeof(*vc_txqs) + (num_qs - 1) *
> +		sizeof(struct virtchnl2_txq_info);
> +	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
> +	if (!vc_txqs) {
> +		PMD_DRV_LOG(ERR, "Failed to allocate
> virtchnl2_config_tx_queues");
> +		err = -ENOMEM;
> +		return err;
> +	}
> +	vc_txqs->vport_id = vport->base.vport_id;
> +	vc_txqs->num_qinfo = num_qs;
> +
> +	for (i = 0; i < num_qs; i++) {
> +		txq_info = &vc_txqs->qinfo[i];
> +		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]-
> >desc_ring.pa;
> +		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
> +		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
> +		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
> +		txq_info->sched_mode =
> VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
> +		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
> +	}
> +
> +	memset(&args, 0, sizeof(args));
> +	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
> +	args.in_args = (uint8_t *)vc_txqs;
> +	args.in_args_size = size;
> +	args.out_buffer = adapter->base.mbx_resp;
> +	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> +	err = idpf_vc_cmd_execute(&adapter->base, &args);
> +	rte_free(vc_txqs);
> +	if (err)
> +		PMD_DRV_LOG(ERR, "Failed to execute command of
> +VIRTCHNL2_OP_CONFIG_TX_QUEUES");
> +
> +	return err;
> +}
> --
> 2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v3 4/9] net/cpfl: setup ctrl path
  2023-09-06  9:34     ` [PATCH v3 4/9] net/cpfl: setup ctrl path Wenjing Qiao
  2023-09-11  6:30       ` Liu, Mingxia
@ 2023-09-11  6:36       ` Wu, Jingjing
  1 sibling, 0 replies; 128+ messages in thread
From: Wu, Jingjing @ 2023-09-11  6:36 UTC (permalink / raw)
  To: Qiao, Wenjing, Zhang, Yuying, dev, Zhang, Qi Z, Xing, Beilei; +Cc: Liu, Mingxia



> -----Original Message-----
> From: Qiao, Wenjing <wenjing.qiao@intel.com>
> Sent: Wednesday, September 6, 2023 5:34 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; dev@dpdk.org; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Cc: Liu, Mingxia <mingxia.liu@intel.com>; Qiao, Wenjing
> <wenjing.qiao@intel.com>
> Subject: [PATCH v3 4/9] net/cpfl: setup ctrl path
> 
> Setup the control vport and control queue for flow offloading.

In general, "[PATCH 3/9] net/cpfl: add FXP low level implementation" also contains ctrl queue setup functions.
May need better organize the patch set.

^ permalink raw reply	[flat|nested] 128+ messages in thread

* FW: [PATCH v3 6/9] net/cpfl: add fxp rule module
  2023-09-06  9:34     ` [PATCH v3 6/9] net/cpfl: add fxp rule module Wenjing Qiao
@ 2023-09-12  7:40       ` Liu, Mingxia
  0 siblings, 0 replies; 128+ messages in thread
From: Liu, Mingxia @ 2023-09-12  7:40 UTC (permalink / raw)
  To: Qiao, Wenjing, Zhang, Yuying, dev, Zhang, Qi Z, Wu, Jingjing,
	Xing, Beilei



> -----Original Message-----
> From: Qiao, Wenjing <wenjing.qiao@intel.com>
> Sent: Wednesday, September 6, 2023 5:34 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; dev@dpdk.org; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Cc: Liu, Mingxia <mingxia.liu@intel.com>
> Subject: [PATCH v3 6/9] net/cpfl: add fxp rule module
> 
> From: Yuying Zhang <yuying.zhang@intel.com>
> 
> Added low level fxp module for rule packing / creation / destroying.
> 
> Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
> ---
>  drivers/net/cpfl/cpfl_controlq.c | 424 +++++++++++++++++++++++++++++++
> drivers/net/cpfl/cpfl_controlq.h |  24 ++
>  drivers/net/cpfl/cpfl_ethdev.c   |  31 +++
>  drivers/net/cpfl/cpfl_ethdev.h   |   6 +
>  drivers/net/cpfl/cpfl_fxp_rule.c | 297 ++++++++++++++++++++++
> drivers/net/cpfl/cpfl_fxp_rule.h |  68 +++++
>  drivers/net/cpfl/meson.build     |   1 +
>  7 files changed, 851 insertions(+)
>  create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c  create mode 100644
> drivers/net/cpfl/cpfl_fxp_rule.h
> 
> diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
> index 476c78f235..ed76282b0c 100644
> --- a/drivers/net/cpfl/cpfl_controlq.c
> +++ b/drivers/net/cpfl/cpfl_controlq.c
> @@ -331,6 +331,402 @@ cpfl_ctlq_add(struct idpf_hw *hw, struct
> cpfl_ctlq_create_info *qinfo,
>  	return status;
>  }
> 
> +/**
> + * cpfl_ctlq_send - send command to Control Queue (CTQ)
> + * @hw: pointer to hw struct
> + * @cq: handle to control queue struct to send on
> + * @num_q_msg: number of messages to send on control queue
> + * @q_msg: pointer to array of queue messages to be sent
> + *
> + * The caller is expected to allocate DMAable buffers and pass them to
> +the
> + * send routine via the q_msg struct / control queue specific data struct.
> + * The control queue will hold a reference to each send message until
> + * the completion for that message has been cleaned.
> + */
> +int
> +cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
> +	       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]) {
> +	struct idpf_ctlq_desc *desc;
> +	int num_desc_avail = 0;
> +	int status = 0;
> +	int i = 0;
> +
> +	if (!cq || !cq->ring_size)
> +		return -ENOBUFS;
> +
> +	idpf_acquire_lock(&cq->cq_lock);
> +
> +	/* Ensure there are enough descriptors to send all messages */
> +	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
> +	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
> +		status = -ENOSPC;
> +		goto sq_send_command_out;
> +	}
> +
> +	for (i = 0; i < num_q_msg; i++) {
> +		struct idpf_ctlq_msg *msg = &q_msg[i];
> +		uint64_t msg_cookie;
> +
> +		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
> +		desc->opcode = CPU_TO_LE16(msg->opcode);
> +		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
> +		msg_cookie = *(uint64_t *)&msg->cookie;
> +		desc->cookie_high =
> +			CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));
> +		desc->cookie_low =
> +			CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));
> +		desc->flags = CPU_TO_LE16((msg->host_id &
> IDPF_HOST_ID_MASK) <<
> +				IDPF_CTLQ_FLAG_HOST_ID_S);
> +		if (msg->data_len) {
> +			struct idpf_dma_mem *buff = msg-
> >ctx.indirect.payload;
> +
> +			desc->datalen |= CPU_TO_LE16(msg->data_len);
> +			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
> +			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
> +			/* Update the address values in the desc with the pa
> +			 * value for respective buffer
> +			 */
> +			desc->params.indirect.addr_high =
> +				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
> +			desc->params.indirect.addr_low =
> +				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
> +			idpf_memcpy(&desc->params, msg-
> >ctx.indirect.context,
> +				    IDPF_INDIRECT_CTX_SIZE,
> IDPF_NONDMA_TO_DMA);
> +		} else {
> +			idpf_memcpy(&desc->params, msg->ctx.direct,
> +				    IDPF_DIRECT_CTX_SIZE,
> IDPF_NONDMA_TO_DMA);
> +		}
> +
> +		/* Store buffer info */
> +		cq->bi.tx_msg[cq->next_to_use] = msg;
> +		(cq->next_to_use)++;
> +		if (cq->next_to_use == cq->ring_size)
> +			cq->next_to_use = 0;
> +	}
> +
> +	/* Force memory write to complete before letting hardware
> +	 * know that there are new descriptors to fetch.
> +	 */
> +	idpf_wmb();
> +	wr32(hw, cq->reg.tail, cq->next_to_use);
> +
> +sq_send_command_out:
> +	idpf_release_lock(&cq->cq_lock);
> +
> +	return status;
> +}
> +
> +/**
> + * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW
> +write
> + * back for the requested queue
> + * @cq: pointer to the specific Control queue
> + * @clean_count: (input|output) number of descriptors to clean as
> +input, and
> + * number of descriptors actually cleaned as output
> + * @msg_status: (output) pointer to msg pointer array to be populated;
> +needs
> + * to be allocated by caller
> + * @force: (input) clean descriptors which were not done yet. Use with
> +caution
> + * in kernel mode only
> + *
> + * Returns an array of message pointers associated with the cleaned
> + * descriptors. The pointers are to the original ctlq_msgs sent on the
> +cleaned
> + * descriptors.  The status will be returned for each; any messages
> +that failed
> + * to send will have a non-zero status. The caller is expected to free
> +original
> + * ctlq_msgs and free or reuse the DMA buffers.
> + */
> +static int
> +__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
> +		     struct idpf_ctlq_msg *msg_status[], bool force) {
> +	struct idpf_ctlq_desc *desc;
> +	uint16_t i = 0, num_to_clean;
> +	uint16_t ntc, desc_err;
> +	int ret = 0;
> +
> +	if (!cq || !cq->ring_size)
> +		return -ENOBUFS;
> +
> +	if (*clean_count == 0)
> +		return 0;
> +	if (*clean_count > cq->ring_size)
> +		return -EINVAL;
> +
> +	idpf_acquire_lock(&cq->cq_lock);
> +	ntc = cq->next_to_clean;
> +	num_to_clean = *clean_count;
> +
> +	for (i = 0; i < num_to_clean; i++) {
> +		/* Fetch next descriptor and check if marked as done */
> +		desc = IDPF_CTLQ_DESC(cq, ntc);
> +		if (!force && !(LE16_TO_CPU(desc->flags) &
> IDPF_CTLQ_FLAG_DD))
> +			break;
> +
> +		desc_err = LE16_TO_CPU(desc->ret_val);
> +		if (desc_err) {
> +			/* strip off FW internal code */
> +			desc_err &= 0xff;
> +		}
> +
> +		msg_status[i] = cq->bi.tx_msg[ntc];
> +		if (!msg_status[i])
> +			break;
> +		msg_status[i]->status = desc_err;
> +		cq->bi.tx_msg[ntc] = NULL;
> +		/* Zero out any stale data */
> +		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
> +		ntc++;
> +		if (ntc == cq->ring_size)
> +			ntc = 0;
> +	}
> +
> +	cq->next_to_clean = ntc;
> +	idpf_release_lock(&cq->cq_lock);
> +
> +	/* Return number of descriptors actually cleaned */
> +	*clean_count = i;
> +
> +	return ret;
> +}
> +
> +/**
> + * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for
> +the
> + * requested queue
> + * @cq: pointer to the specific Control queue
> + * @clean_count: (input|output) number of descriptors to clean as
> +input, and
> + * number of descriptors actually cleaned as output
> + * @msg_status: (output) pointer to msg pointer array to be populated;
> +needs
> + * to be allocated by caller
> + *
> + * Returns an array of message pointers associated with the cleaned
> + * descriptors. The pointers are to the original ctlq_msgs sent on the
> +cleaned
> + * descriptors.  The status will be returned for each; any messages
> +that failed
> + * to send will have a non-zero status. The caller is expected to free
> +original
> + * ctlq_msgs and free or reuse the DMA buffers.
> + */
> +int
> +cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
> +		   struct idpf_ctlq_msg *msg_status[]) {
> +	return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false); }
> +
> +/**
> + * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
> + * @hw: pointer to hw struct
> + * @cq: pointer to control queue handle
> + * @buff_count: (input|output) input is number of buffers caller is
> +trying to
> + * return; output is number of buffers that were not posted
> + * @buffs: array of pointers to dma mem structs to be given to hardware
> + *
> + * Caller uses this function to return DMA buffers to the descriptor
> +ring after
> + * consuming them; buff_count will be the number of buffers.
> + *
> + * Note: this function needs to be called after a receive call even
> + * if there are no DMA buffers to be returned, i.e. buff_count = 0,
> + * buffs = NULL to support direct commands  */ int
> +cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
> +			uint16_t *buff_count, struct idpf_dma_mem **buffs) {
> +	struct idpf_ctlq_desc *desc;
> +	uint16_t ntp = cq->next_to_post;
> +	bool buffs_avail = false;
> +	uint16_t tbp = ntp + 1;
> +	int status = 0;
> +	int i = 0;
> +
> +	if (*buff_count > cq->ring_size)
> +		return -EINVAL;
> +
> +	if (*buff_count > 0)
> +		buffs_avail = true;
> +	idpf_acquire_lock(&cq->cq_lock);
> +	if (tbp >= cq->ring_size)
> +		tbp = 0;
> +
> +	if (tbp == cq->next_to_clean)
> +		/* Nothing to do */
> +		goto post_buffs_out;
> +
> +	/* Post buffers for as many as provided or up until the last one used */
> +	while (ntp != cq->next_to_clean) {
> +		desc = IDPF_CTLQ_DESC(cq, ntp);
> +		if (cq->bi.rx_buff[ntp])
> +			goto fill_desc;
> +		if (!buffs_avail) {
> +			/* If the caller hasn't given us any buffers or
> +			 * there are none left, search the ring itself
> +			 * for an available buffer to move to this
> +			 * entry starting at the next entry in the ring
> +			 */
> +			tbp = ntp + 1;
> +			/* Wrap ring if necessary */
> +			if (tbp >= cq->ring_size)
> +				tbp = 0;
> +
> +			while (tbp != cq->next_to_clean) {
> +				if (cq->bi.rx_buff[tbp]) {
> +					cq->bi.rx_buff[ntp] =
> +						cq->bi.rx_buff[tbp];
> +					cq->bi.rx_buff[tbp] = NULL;
> +
> +					/* Found a buffer, no need to
> +					 * search anymore
> +					 */
> +					break;
> +				}
> +
> +				/* Wrap ring if necessary */
> +				tbp++;
> +				if (tbp >= cq->ring_size)
> +					tbp = 0;
> +			}
> +
> +			if (tbp == cq->next_to_clean)
> +				goto post_buffs_out;
> +		} else {
> +			/* Give back pointer to DMA buffer */
> +			cq->bi.rx_buff[ntp] = buffs[i];
> +			i++;
> +
> +			if (i >= *buff_count)
> +				buffs_avail = false;
> +		}
> +
> +fill_desc:
> +		desc->flags =
> +			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF |
> IDPF_CTLQ_FLAG_RD);
> +
> +		/* Post buffers to descriptor */
> +		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
> +		desc->params.indirect.addr_high =
> +			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]-
> >pa));
> +		desc->params.indirect.addr_low =
> +			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]-
> >pa));
> +
> +		ntp++;
> +		if (ntp == cq->ring_size)
> +			ntp = 0;
> +	}
> +
> +post_buffs_out:
> +	/* Only update tail if buffers were actually posted */
> +	if (cq->next_to_post != ntp) {
> +		if (ntp)
> +			/* Update next_to_post to ntp - 1 since current ntp
> +			 * will not have a buffer
> +			 */
> +			cq->next_to_post = ntp - 1;
> +		else
> +			/* Wrap to end of end ring since current ntp is 0 */
> +			cq->next_to_post = cq->ring_size - 1;
> +
> +		wr32(hw, cq->reg.tail, cq->next_to_post);
> +	}
> +
> +	idpf_release_lock(&cq->cq_lock);
> +	/* return the number of buffers that were not posted */
> +	*buff_count = *buff_count - i;
> +
> +	return status;
> +}
> +
> +/**
> + * cpfl_ctlq_recv - receive control queue message call back
> + * @cq: pointer to control queue handle to receive on
> + * @num_q_msg: (input|output) input number of messages that should be
> +received;
> + * output number of messages actually received
> + * @q_msg: (output) array of received control queue messages on this q;
> + * needs to be pre-allocated by caller for as many messages as
> +requested
> + *
> + * Called by interrupt handler or polling mechanism. Caller is expected
> + * to free buffers
> + */
> +int
> +cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
> +	       struct idpf_ctlq_msg *q_msg)
> +{
> +	uint16_t num_to_clean, ntc, ret_val, flags;
> +	struct idpf_ctlq_desc *desc;
> +	int ret_code = 0;
> +	uint16_t i = 0;
> +
> +	if (!cq || !cq->ring_size)
> +		return -ENOBUFS;
> +
> +	if (*num_q_msg == 0)
> +		return 0;
> +	else if (*num_q_msg > cq->ring_size)
> +		return -EINVAL;
> +
> +	/* take the lock before we start messing with the ring */
> +	idpf_acquire_lock(&cq->cq_lock);
> +	ntc = cq->next_to_clean;
> +	num_to_clean = *num_q_msg;
> +
> +	for (i = 0; i < num_to_clean; i++) {
> +		/* Fetch next descriptor and check if marked as done */
> +		desc = IDPF_CTLQ_DESC(cq, ntc);
> +		flags = LE16_TO_CPU(desc->flags);
> +		if (!(flags & IDPF_CTLQ_FLAG_DD))
> +			break;
> +
> +		ret_val = LE16_TO_CPU(desc->ret_val);
> +		q_msg[i].vmvf_type = (flags &
> +				     (IDPF_CTLQ_FLAG_FTYPE_VM |
> +				      IDPF_CTLQ_FLAG_FTYPE_PF)) >>
> +				      IDPF_CTLQ_FLAG_FTYPE_S;
> +
> +		if (flags & IDPF_CTLQ_FLAG_ERR)
> +			ret_code = -EBADMSG;
> +
> +		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc-
> >cookie_high);
> +		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc-
> >cookie_low);
> +		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
> +		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
> +		q_msg[i].status = ret_val;
> +
> +		if (desc->datalen) {
> +			idpf_memcpy(q_msg[i].ctx.indirect.context,
> +				    &desc->params.indirect,
> +				    IDPF_INDIRECT_CTX_SIZE,
> +				    IDPF_DMA_TO_NONDMA);
> +
> +			/* Assign pointer to dma buffer to ctlq_msg array
> +			 * to be given to upper layer
> +			 */
> +			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
> +
> +			/* Zero out pointer to DMA buffer info;
> +			 * will be repopulated by post buffers API
> +			 */
> +			cq->bi.rx_buff[ntc] = NULL;
> +		} else {
> +			idpf_memcpy(q_msg[i].ctx.direct,
> +				    desc->params.raw,
> +				    IDPF_DIRECT_CTX_SIZE,
> +				    IDPF_DMA_TO_NONDMA);
> +		}
> +
> +		/* Zero out stale data in descriptor */
> +		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
> +			    IDPF_DMA_MEM);
> +
> +		ntc++;
> +		if (ntc == cq->ring_size)
> +			ntc = 0;
> +	};
> +
> +	cq->next_to_clean = ntc;
> +	idpf_release_lock(&cq->cq_lock);
> +	*num_q_msg = i;
> +	if (*num_q_msg == 0)
> +		ret_code = -ENOMSG;
> +
> +	return ret_code;
> +}
> +
>  int
>  cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
>  		    struct idpf_ctlq_info **cq)
> @@ -377,3 +773,31 @@ cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct
> idpf_ctlq_info *cq)  {
>  	cpfl_ctlq_remove(hw, cq);
>  }
> +
> +int
> +cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
> +		     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]) {
> +	return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg); }
> +
> +int
> +cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
> +		     struct idpf_ctlq_msg q_msg[])
> +{
> +	return cpfl_ctlq_recv(cq, num_q_msg, q_msg); }
> +
> +int
> +cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
> +			      uint16_t *buff_count, struct idpf_dma_mem
> **buffs) {
> +	return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs); }
> +
> +int
> +cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
> +			 struct idpf_ctlq_msg *msg_status[]) {
> +	return cpfl_ctlq_clean_sq(cq, clean_count, msg_status); }
> diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
> index 930d717f63..740ae6522c 100644
> --- a/drivers/net/cpfl/cpfl_controlq.h
> +++ b/drivers/net/cpfl/cpfl_controlq.h
> @@ -14,6 +14,13 @@
>  #define CPFL_DFLT_MBX_RING_LEN		512
>  #define CPFL_CFGQ_RING_LEN		512
> 
> +/* CRQ/CSQ specific error codes */
> +#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */
> +#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */
> +#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */
> +#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */
> +#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */
> +
[Liu, Mingxia] How about replacing the  const number with macro statement, such as,
+#define CPFL_ERR_CTLQ_ERROR             (-EBADMSG)

>  /* Generic queue info structures */
>  /* MB, CONFIG and EVENT q do not have extended info */  struct
> cpfl_ctlq_create_info { @@ -44,8 +51,25 @@ int cpfl_ctlq_alloc_ring_res(struct
> idpf_hw *hw,  int cpfl_ctlq_add(struct idpf_hw *hw,
>  		  struct cpfl_ctlq_create_info *qinfo,
>  		  struct idpf_ctlq_info **cq);
> +int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
> +		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[]); int
> +cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
> +		       struct idpf_ctlq_msg *msg_status[]); int
> +cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
> +			    u16 *buff_count, struct idpf_dma_mem **buffs); int
> +cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
> +		   struct idpf_ctlq_msg *q_msg);
>  int cpfl_vport_ctlq_add(struct idpf_hw *hw,
>  			struct cpfl_ctlq_create_info *qinfo,
>  			struct idpf_ctlq_info **cq);
>  void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
> +int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
> +			 u16 num_q_msg, struct idpf_ctlq_msg q_msg[]); int
> +cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
> +			 struct idpf_ctlq_msg q_msg[]);
> +
> +int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info
> *cq,
> +				  u16 *buff_count, struct idpf_dma_mem
> **buffs); int
> +cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
> +			     struct idpf_ctlq_msg *msg_status[]);
>  #endif
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index
> 618a6a0fe2..08a55f0352 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -16,6 +16,7 @@
>  #include <ethdev_private.h>
>  #include "cpfl_rxtx.h"
>  #include "cpfl_flow.h"
> +#include "cpfl_rules.h"
> 
>  #define CPFL_REPRESENTOR	"representor"
>  #define CPFL_TX_SINGLE_Q	"tx_single"
> @@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
>  	adapter->cur_vport_nb--;
>  	dev->data->dev_private = NULL;
>  	adapter->vports[vport->sw_idx] = NULL;
> +	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
>  	rte_free(cpfl_vport);
> 
>  	return 0;
> @@ -2462,6 +2464,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport
> *cpfl_vport,
>  	return 0;
>  }
> 
> +int
> +cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct
> idpf_dma_mem *dma, uint32_t size,
> +			 int batch_size)
> +{
> +	int i;
> +
> +	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
> +		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
> +		return -ENOMEM;
> +	}
> +
> +	for (i = 0; i < batch_size; i++) {
> +		dma[i].va = (void *)((uint64_t)orig_dma->va + size * (i + 1));
> +		dma[i].pa = orig_dma->pa + size * (i + 1);
> +		dma[i].size = size;
> +		dma[i].zone = NULL;
> +	}
> +	return 0;
> +}
> +
>  static int
>  cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)  { @@ -2511,6
> +2533,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
>  	rte_ether_addr_copy((struct rte_ether_addr *)vport-
> >default_mac_addr,
>  			    &dev->data->mac_addrs[0]);
> 
> +	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
> +	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
> +	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
> +				       cpfl_vport->itf.dma,
> +				       sizeof(union cpfl_rule_cfg_pkt_record),
> +				       CPFL_FLOW_BATCH_SIZE);
> +	if (ret < 0)
> +		goto err_mac_addrs;
> +
>  	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
>  		memset(&p2p_queue_grps_info, 0,
> sizeof(p2p_queue_grps_info));
>  		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info,
> p2p_q_vc_out_info); diff --git a/drivers/net/cpfl/cpfl_ethdev.h
> b/drivers/net/cpfl/cpfl_ethdev.h index be625284a4..6b02573b4a 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -149,10 +149,14 @@ enum cpfl_itf_type {
> 
>  TAILQ_HEAD(cpfl_flow_list, rte_flow);
> 
> +#define CPFL_FLOW_BATCH_SIZE  490
>  struct cpfl_itf {
>  	enum cpfl_itf_type type;
>  	struct cpfl_adapter_ext *adapter;
>  	struct cpfl_flow_list flow_list;
> +	struct idpf_dma_mem flow_dma;
> +	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
> +	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
>  	void *data;
>  };
> 
> @@ -238,6 +242,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext
> *adapter,
>  			   struct cpchnl2_vport_id *vport_id,
>  			   struct cpfl_vport_id *vi,
>  			   struct cpchnl2_get_vport_info_response *response);
> +int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct
> idpf_dma_mem *dma,
> +			     uint32_t size, int batch_size);
> 
>  #define CPFL_DEV_TO_PCI(eth_dev)		\
>  	RTE_DEV_TO_PCI((eth_dev)->device)
> diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
> new file mode 100644
> index 0000000000..f87ccc9f77
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_fxp_rule.c
> @@ -0,0 +1,297 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 Intel Corporation
> + */
> +#include "cpfl_ethdev.h"
> +
> +#include "cpfl_fxp_rule.h"
> +#include "cpfl_logs.h"
> +
> +#define CTLQ_SEND_RETRIES 100
> +#define CTLQ_RECEIVE_RETRIES 100
> +
> +int
> +cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16
> num_q_msg,
> +		   struct idpf_ctlq_msg q_msg[])
> +{
> +	struct idpf_ctlq_msg **msg_ptr_list;
> +	u16 clean_count = 0;
> +	int num_cleaned = 0;
> +	int retries = 0;
> +	int ret = 0;
> +
> +	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
> +	if (!msg_ptr_list) {
> +		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
> +		ret = -ENOMEM;
> +		goto err;
> +	}
> +
> +	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error:
> 0x%4x", ret);
> +		goto send_err;
> +	}
> +
> +	while (retries <= CTLQ_SEND_RETRIES) {
> +		clean_count = num_q_msg - num_cleaned;
> +		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
> +					       &msg_ptr_list[num_cleaned]);
> +		if (ret) {
> +			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
> +			goto send_err;
> +		}
> +
> +		num_cleaned += clean_count;
> +		retries++;
> +		if (num_cleaned >= num_q_msg)
> +			break;
> +		rte_delay_us_sleep(10);
> +	}
> +
> +	if (retries > CTLQ_SEND_RETRIES) {
> +		PMD_INIT_LOG(ERR, "timed out while polling for
> completions");
> +		ret = -1;
> +		goto send_err;
> +	}
> +
> +send_err:
> +	if (msg_ptr_list)
> +		free(msg_ptr_list);
> +err:
> +	return ret;
> +}
> +
> +static int
> +cpfl_process_rx_ctlq_msg(u16 num_q_msg, struct idpf_ctlq_msg *q_msg) {
> +	u16 i;
> +	int ret = 0;
> +
> +	if (!num_q_msg || !q_msg)
> +		return -EINVAL;
> +
> +	for (i = 0; i < num_q_msg; i++) {
> +		if (q_msg[i].status == CPFL_CFG_PKT_ERR_OK) {
> +			continue;
> +		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_EEXIST &&
> +			   q_msg[i].opcode == cpfl_ctlq_sem_add_rule) {
> +			PMD_INIT_LOG(ERR, "The rule has confliction with
> already existed one");
> +			return -EINVAL;
> +		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_ENOTFND &&
> +			   q_msg[i].opcode == cpfl_ctlq_sem_del_rule) {
> +			PMD_INIT_LOG(ERR, "The rule has already deleted");
> +			return -EINVAL;
> +		} else {
> +			PMD_INIT_LOG(ERR, "Invalid rule");
> +			return -EINVAL;
> +		}
> +	}
> +
> +	return ret;
[Liu, Mingxia] The ret value has never been changed, can it be deleted? Return 0 directly.
> +}
> +
> +int
> +cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16
> num_q_msg,
> +		      struct idpf_ctlq_msg q_msg[])
> +{
> +	int retries = 0;
> +	struct idpf_dma_mem *dma;
> +	u16 i;
> +	uint16_t buff_cnt;
> +	int ret = 0, handle_rule = 0;
> +
> +	retries = 0;
> +	while (retries <= CTLQ_RECEIVE_RETRIES) {
> +		rte_delay_us_sleep(10);
> +		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
> +
> +		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
> +		    ret != CPFL_ERR_CTLQ_ERROR) {
> +			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err:
> 0x%4x\n", ret);
> +			retries++;
> +			continue;
> +		}
> +
> +		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
> +			retries++;
> +			continue;
> +		}
> +
> +		if (ret == CPFL_ERR_CTLQ_EMPTY)
> +			break;
> +
> +		ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
> +		if (ret) {
> +			PMD_INIT_LOG(WARNING, "failed to process rx_ctrlq
> msg");
[Liu, Mingxia] The log error is WARNING, but the return value is passed to the calling function, how about user ERROR log level?
> +			handle_rule = ret;
> +		}
> +
> +		for (i = 0; i < num_q_msg; i++) {
> +			if (q_msg[i].data_len > 0)
> +				dma = q_msg[i].ctx.indirect.payload;
> +			else
> +				dma = NULL;
> +
> +			buff_cnt = dma ? 1 : 0;
> +			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt,
> &dma);
> +			if (ret)
> +				PMD_INIT_LOG(WARNING, "could not posted
> recv bufs\n");
[Liu, Mingxia] The log level is WARNING, but the return value is passed to the calling function, how about user ERROR log level?
> +		}
> +		break;
> +	}
> +
> +	if (retries > CTLQ_RECEIVE_RETRIES) {
> +		PMD_INIT_LOG(ERR, "timed out while polling for receive
> response");
> +		ret = -1;
> +	}
> +
> +	return ret + handle_rule;
[Liu, Mingxia] Looks a bit confused and weird, the calling function cpfl_rule_process() only check if return value < 0, so how about return -1 if (ret < 0 || handle_rule < 0) ?
> +}
> +
> +static int
> +cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
> +		   struct idpf_ctlq_msg *msg)
> +{
> +	struct cpfl_mod_rule_info *minfo = &rinfo->mod;
> +	union cpfl_rule_cfg_pkt_record *blob = NULL;
> +	struct cpfl_rule_cfg_data cfg = {0};
> +
> +	/* prepare rule blob */
> +	if (!dma->va) {
> +		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n",
> __func__);
> +		return -1;
> +	}
> +	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
> +	memset(blob, 0, sizeof(*blob));
> +	memset(&cfg, 0, sizeof(cfg));
> +
> +	/* fill info for both query and add/update */
> +	cpfl_fill_rule_mod_content(minfo->mod_obj_size,
> +				   minfo->pin_mod_content,
> +				   minfo->mod_index,
> +				   &cfg.ext.mod_content);
> +
> +	/* only fill content for add/update */
> +	memcpy(blob->mod_blob, minfo->mod_content,
> +	       minfo->mod_content_byte_len);
> +
> +#define NO_HOST_NEEDED 0
> +	/* pack message */
> +	cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
> +				       rinfo->cookie,
> +				       0, /* vsi_id not used for mod */
> +				       rinfo->port_num,
> +				       NO_HOST_NEEDED,
> +				       0, /* time_sel */
> +				       0, /* time_sel_val */
> +				       0, /* cache_wr_thru */
> +				       rinfo->resp_req,
> +				       (u16)sizeof(*blob),
> +				       (void *)dma,
> +				       &cfg.common);
> +	cpfl_prep_rule_desc(&cfg, msg);
> +	return 0;
> +}
> +
> +static int
> +cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem
> *dma,
> +		       struct idpf_ctlq_msg *msg, bool add) {
> +	union cpfl_rule_cfg_pkt_record *blob = NULL;
> +	enum cpfl_ctlq_rule_cfg_opc opc;
> +	struct cpfl_rule_cfg_data cfg;
> +	uint16_t cfg_ctrl;
> +
> +	if (!dma->va) {
> +		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n",
> __func__);
> +		return -1;
> +	}
> +	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
> +	memset(blob, 0, sizeof(*blob));
> +	memset(msg, 0, sizeof(*msg));
> +
> +	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
> +		cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo-
> >sem.prof_id,
> +							  rinfo-
> >sem.sub_prof_id,
> +							  rinfo-
> >sem.pin_to_cache,
> +							  rinfo-
> >sem.fixed_fetch);
> +		cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo-
> >sem.key_byte_len,
> +					rinfo->act_bytes, rinfo->act_byte_len,
> +					cfg_ctrl, blob);
> +		opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
> +	} else {
> +		PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
> +		return -1;
> +	}
> +
> +	cpfl_fill_rule_cfg_data_common(opc,
> +				       rinfo->cookie,
> +				       rinfo->vsi,
> +				       rinfo->port_num,
> +				       rinfo->host_id,
> +				       0, /* time_sel */
> +				       0, /* time_sel_val */
> +				       0, /* cache_wr_thru */
> +				       rinfo->resp_req,
> +				       sizeof(union cpfl_rule_cfg_pkt_record),
> +				       dma,
> +				       &cfg.common);
> +	cpfl_prep_rule_desc(&cfg, msg);
> +	return 0;
> +}
> +
> +static int
> +cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
> +	       struct idpf_ctlq_msg *msg, bool add) {
> +	int ret = 0;
> +
> +	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
> +		if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
> +			ret = -1;
> +	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
> +		if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)
> +			ret = -1;
> +	} else {
> +		PMD_INIT_LOG(ERR, "Invalid type of rule");
> +		ret = -1;
> +	}
> +
> +	return ret;
> +}
> +
> +int
> +cpfl_rule_process(struct cpfl_itf *itf,
> +		  struct idpf_ctlq_info *tx_cq,
> +		  struct idpf_ctlq_info *rx_cq,
> +		  struct cpfl_rule_info *rinfo,
> +		  int rule_num,
> +		  bool add)
> +{
> +	struct idpf_hw *hw = &itf->adapter->base.hw;
> +	int i;
> +	int ret = 0;
> +
> +	if (rule_num == 0)
> +		return 0;
> +
> +	for (i = 0; i < rule_num; i++) {
> +		ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
> +		if (ret) {
> +			PMD_INIT_LOG(ERR, "Could not pack rule");
> +			return ret;
> +		}
> +	}
> +	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to send control message");
> +		return ret;
> +	}
> +	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "Failed to update rule");
> +		return ret;
> +	}
> +
> +	return 0;
> +}


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v5 0/9] add rte flow support for cpfl
  2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
                       ` (9 preceding siblings ...)
  2023-09-06  9:34     ` [PATCH v3 9/9] app/test-pmd: refine encap content Wenjing Qiao
@ 2023-09-15 10:00     ` Zhang, Yuying
  2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
                         ` (9 more replies)
  10 siblings, 10 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-15 10:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

This patchset add rte flow support for cpfl driver.
It depends on the following patch set:
http://patchwork.dpdk.org/project/dpdk/cover/20230912173039.1612287-1-beilei.xing@intel.com/

Wenjing Qiao (2):
  net/cpfl: add json parser for rte flow pattern rules
  net/cpfl: add mod rule parser support for rte flow

Yuying Zhang (7):
  net/cpfl: set up rte flow skeleton
  net/cpfl: add FXP low level implementation
  net/cpfl: add fxp rule module
  net/cpfl: add fxp flow engine
  net/cpfl: add flow support for representor
  app/test-pmd: refine encap content
  net/cpfl: fix incorrect status calculation

 app/test-pmd/cmdline_flow.c             |   12 +-
 doc/guides/nics/cpfl.rst                |   43 +
 doc/guides/rel_notes/release_23_11.rst  |    1 +
 drivers/net/cpfl/cpfl_actions.h         |  858 +++++++++++
 drivers/net/cpfl/cpfl_controlq.c        |  803 ++++++++++
 drivers/net/cpfl/cpfl_controlq.h        |   75 +
 drivers/net/cpfl/cpfl_ethdev.c          |  394 ++++-
 drivers/net/cpfl/cpfl_ethdev.h          |  128 ++
 drivers/net/cpfl/cpfl_flow.c            |  339 +++++
 drivers/net/cpfl/cpfl_flow.h            |   85 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c |  667 +++++++++
 drivers/net/cpfl/cpfl_flow_parser.c     | 1834 +++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h     |  267 ++++
 drivers/net/cpfl/cpfl_fxp_rule.c        |  296 ++++
 drivers/net/cpfl/cpfl_fxp_rule.h        |   68 +
 drivers/net/cpfl/cpfl_representor.c     |   29 +
 drivers/net/cpfl/cpfl_rules.c           |  126 ++
 drivers/net/cpfl/cpfl_rules.h           |  306 ++++
 drivers/net/cpfl/cpfl_vchnl.c           |  144 ++
 drivers/net/cpfl/meson.build            |   18 +
 20 files changed, 6489 insertions(+), 4 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v5 1/9] net/cpfl: add json parser for rte flow pattern rules
  2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
  2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
@ 2023-09-15 10:00       ` Zhang, Yuying
  2023-09-15 11:14         ` Zhang, Qi Z
  2023-09-15 10:00       ` [PATCH v5 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
                         ` (7 subsequent siblings)
  9 siblings, 1 reply; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-15 10:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu
  Cc: mingxia.liu, Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Add devargs "flow_parser" for rte flow json parser which
depends on json-c library.

Example:
    -a ca:00.0,flow_parser="refpkg.json"

Add json parser for rte flow pattern rules.The cpfl
PMD supports utilizing a JSON config file to translate
rte flow tokens into low level hardware resources.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 doc/guides/nics/cpfl.rst            |   30 +
 drivers/net/cpfl/cpfl_ethdev.c      |   38 +-
 drivers/net/cpfl/cpfl_ethdev.h      |   76 ++
 drivers/net/cpfl/cpfl_flow_parser.c | 1302 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  167 ++++
 drivers/net/cpfl/meson.build        |   13 +
 6 files changed, 1625 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 83a18c3f2e..aae157f0df 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -128,12 +128,24 @@ Runtime Configuration
 
     -a BDF,representor=vf[0-3],representor=c1pf1
 
+- ``flow_parser`` (default ``not enabled``)
+
+  The cpfl PMD supports utilizing a JSON config file to translate rte_flow tokens into
+  low-level hardware resources.
+  Using the ``devargs`` option ``flow_parser`` the user can specify the path
+  of a json file, for example::
+
+    -a ca:00.0,flow_parser="refpkg.json"
+
+  Then the PMD will load json file for device ``ca:00.0``.
+  The parameter is optional.
 
 Driver compilation and testing
 ------------------------------
 
 Refer to the document :doc:`build_and_test` for details.
 
+The json-c library must be installed to use rte_flow.
 
 Features
 --------
@@ -164,3 +176,21 @@ Hairpin queue
 E2100 Series can loopback packets from RX port to TX port.
 This feature is called port-to-port or hairpin.
 Currently, the PMD only supports single port hairpin.
+
+Rte_flow
+~~~~~~~~~~~~~
+
+PMD uses a json file to direct CPF PMD to parse rte_flow tokens into
+low level hardware resources.
+
+- Required Libraries
+
+  * json-c (version 0.14+)
+
+    * For Ubuntu, it can be installed using `apt install libjson-c-dev`
+
+- run testpmd with the json file
+
+   .. code-block:: console
+
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 189072ab33..54ae127cc3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -21,6 +21,10 @@
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+#define CPFL_FLOW_PARSER	"flow_parser"
+#endif
+
 rte_spinlock_t cpfl_adapter_lock;
 /* A list for all adapters, one adapter matches one PCI device */
 struct cpfl_adapter_list cpfl_adapter_list;
@@ -31,6 +35,9 @@ static const char * const cpfl_valid_args_first[] = {
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	CPFL_FLOW_PARSER,
+#endif
 	NULL
 };
 
@@ -1537,6 +1544,24 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 	return 0;
 }
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+static int
+parse_file(const char *key, const char *value, void *args)
+{
+	char *name = args;
+
+	if (strlen(value) > CPFL_FLOW_FILE_LEN - 1) {
+		PMD_DRV_LOG(ERR, "file path(%s) is too long.", value);
+		return -1;
+	}
+
+	PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key);
+	strlcpy(name, value, CPFL_FLOW_FILE_LEN);
+
+	return 0;
+}
+#endif
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
@@ -1585,7 +1610,18 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 				 &adapter->base.is_rx_singleq);
 	if (ret != 0)
 		goto fail;
-
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) {
+		ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER,
+					 &parse_file, cpfl_args->flow_parser);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to parser flow_parser, ret: %d", ret);
+			goto fail;
+		}
+	} else {
+		cpfl_args->flow_parser[0] = '\0';
+	}
+#endif
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index d0dcc0cc05..383dbd14c6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -77,6 +77,11 @@
 #define CPFL_VPORT_LAN_PF	0
 #define CPFL_VPORT_LAN_VF	1
 
+#define CPFL_FLOW_FILE_LEN 100
+#define CPFL_INVALID_HW_ID	UINT16_MAX
+#define CPFL_META_CHUNK_LENGTH	1024
+#define CPFL_META_LENGTH	32
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -99,6 +104,7 @@ struct cpfl_devargs {
 	uint16_t req_vport_nb;
 	uint8_t repr_args_num;
 	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
+	char flow_parser[CPFL_FLOW_FILE_LEN];
 };
 
 struct p2p_queue_chunks_info {
@@ -165,6 +171,20 @@ struct cpfl_repr {
 	bool func_up; /* If the represented function is up */
 };
 
+struct cpfl_metadata_chunk {
+	int type;
+	uint8_t data[CPFL_META_CHUNK_LENGTH];
+};
+
+/**
+ * It is driver's responsibility to simlulate a metadata buffer which
+ * can be used as data source to fill the key of a flow rule.
+ */
+struct cpfl_metadata {
+	int length;
+	struct cpfl_metadata_chunk chunks[CPFL_META_LENGTH];
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -185,6 +205,8 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
+
+	struct cpfl_metadata meta;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -211,4 +233,58 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
+static inline uint16_t
+cpfl_get_port_id(struct cpfl_itf *itf)
+{
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		struct cpfl_vport *vport = (void *)itf;
+
+		return vport->base.devarg_id;
+	}
+
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline uint16_t
+cpfl_get_vsi_id(struct cpfl_itf *itf)
+{
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_vport_info *info;
+	uint32_t vport_id;
+	int ret;
+	struct cpfl_vport_id vport_identity;
+
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		struct cpfl_repr *repr = (void *)itf;
+
+		return repr->vport_info->vport.info.vsi_id;
+	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
+
+		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		/* host: CPFL_HOST0_CPF_ID, acc: CPFL_ACC_CPF_ID */
+		vport_identity.pf_id = CPFL_ACC_CPF_ID;
+		vport_identity.vf_id = 0;
+		vport_identity.vport_id = vport_id;
+		ret = rte_hash_lookup_data(adapter->vport_map_hash,
+					   &vport_identity,
+					   (void **)&info);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "vport id not exist");
+			goto err;
+		}
+
+		return info->vport.info.vsi_id;
+	}
+
+err:
+	return CPFL_INVALID_HW_ID;
+}
+
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
new file mode 100644
index 0000000000..630ce8a227
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -0,0 +1,1302 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <arpa/inet.h>
+#include <asm-generic/errno-base.h>
+#include <stdint.h>
+
+#include "cpfl_flow_parser.h"
+#include "cpfl_ethdev.h"
+#include "rte_malloc.h"
+
+static enum rte_flow_item_type
+cpfl_get_item_type_by_str(const char *type)
+{
+	if (strcmp(type, "eth") == 0)
+		return RTE_FLOW_ITEM_TYPE_ETH;
+	else if (strcmp(type, "ipv4") == 0)
+		return RTE_FLOW_ITEM_TYPE_IPV4;
+	else if (strcmp(type, "tcp") == 0)
+		return RTE_FLOW_ITEM_TYPE_TCP;
+	else if (strcmp(type, "udp") == 0)
+		return RTE_FLOW_ITEM_TYPE_UDP;
+	else if (strcmp(type, "vxlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VXLAN;
+	else if (strcmp(type, "icmp") == 0)
+		return RTE_FLOW_ITEM_TYPE_ICMP;
+	else if (strcmp(type, "vlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VLAN;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ITEM_TYPE_VOID;
+}
+
+static const char *
+cpfl_json_object_to_string(json_object *object, const char *name)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return NULL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return NULL;
+	}
+
+	return json_object_get_string(subobject);
+}
+
+static int
+cpfl_json_object_to_uint16(json_object *object, const char *name, uint16_t *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_json_object_to_uint32(json_object *object, const char *name, uint32_t *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int64(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_attr(json_object *ob_pr_key_attrs, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len;
+	struct cpfl_flow_js_pr_key_attr *attr;
+
+	len = json_object_array_length(ob_pr_key_attrs);
+	js_pr->key.attributes = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_attr), 0);
+	if (!js_pr->key.attributes) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->key.attr_size = len;
+	attr = js_pr->key.attributes;
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		const char *name;
+		uint16_t value = 0;
+		int ret;
+
+		object = json_object_array_get_idx(ob_pr_key_attrs, i);
+		name = cpfl_json_object_to_string(object, "Name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'Name'.");
+			goto err;
+		}
+		ret = cpfl_json_object_to_uint16(object, "Value", &value);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+			goto err;
+		}
+		if (strcmp(name, "ingress") == 0) {
+			attr->ingress = value;
+		} else if (strcmp(name, "egress") == 0) {
+			attr->egress = value;
+		} else {
+			/* TODO: more... */
+			PMD_DRV_LOG(ERR, "Not support attr name: %s.", name);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	rte_free(js_pr->key.attributes);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto_field(json_object *ob_fields,
+				     struct cpfl_flow_js_pr_key_proto *js_field)
+{
+	int len, i;
+
+	if (!ob_fields)
+		return 0;
+	len = json_object_array_length(ob_fields);
+	if (len == 0)
+		return 0;
+	js_field->fields_size = len;
+	js_field->fields =
+	    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len, 0);
+	if (!js_field->fields) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		const char *name, *mask;
+
+		object = json_object_array_get_idx(ob_fields, i);
+		name = cpfl_json_object_to_string(object, "name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
+			goto err;
+		}
+		if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+			PMD_DRV_LOG(ERR, "The 'name' is too long.");
+			goto err;
+		}
+		memcpy(js_field->fields[i].name, name, strlen(name));
+
+		if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
+		    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			mask = cpfl_json_object_to_string(object, "mask");
+			if (!mask) {
+				PMD_DRV_LOG(ERR, "Can not parse string 'mask'.");
+				goto err;
+			}
+			if (strlen(mask) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+				PMD_DRV_LOG(ERR, "The 'mask' is too long.");
+				goto err;
+			}
+			memcpy(js_field->fields[i].mask, mask, strlen(mask));
+		} else {
+			uint32_t mask_32b;
+			int ret;
+
+			ret = cpfl_json_object_to_uint32(object, "mask", &mask_32b);
+			if (ret < 0) {
+				PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'.");
+				goto err;
+			}
+			js_field->fields[i].mask_32b = mask_32b;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_field->fields);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto(json_object *ob_pr_key_protos, struct cpfl_flow_js_pr *js_pr)
+{
+	int len, i, ret;
+
+	len = json_object_array_length(ob_pr_key_protos);
+	if (len == 0)
+		return 0;
+	js_pr->key.proto_size = len;
+	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto) * len, 0);
+	if (!js_pr->key.protocols) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_object *object, *ob_fields;
+		const char *type;
+		enum rte_flow_item_type item_type;
+
+		object = json_object_array_get_idx(ob_pr_key_protos, i);
+		/* pr->key->proto->type */
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		item_type = cpfl_get_item_type_by_str(type);
+		if (item_type == RTE_FLOW_ITEM_TYPE_VOID)
+			goto err;
+		js_pr->key.protocols[i].type = item_type;
+		/* pr->key->proto->fields */
+		ob_fields = json_object_object_get(object, "fields");
+		ret = cpfl_flow_js_pattern_key_proto_field(ob_fields,
+							   &js_pr->key.protocols[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	rte_free(js_pr->key.protocols);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_proto(json_object *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	uint16_t layer = 0, offset = 0, mask = 0;
+	const char *header;
+	enum rte_flow_item_type type;
+	int ret;
+
+	ret = cpfl_json_object_to_uint16(ob_value, "layer", &layer);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+		return -EINVAL;
+	}
+
+	header = cpfl_json_object_to_string(ob_value, "header");
+	if (!header) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_object_to_uint16(ob_value, "offset", &offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_object_to_uint16(ob_value, "mask", &mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
+		return -EINVAL;
+	}
+	type = cpfl_get_item_type_by_str(header);
+	if (type == RTE_FLOW_ITEM_TYPE_VOID)
+		return -EINVAL;
+	js_fv->proto.layer = layer;
+	js_fv->proto.offset = offset;
+	js_fv->proto.mask = mask;
+	js_fv->proto.header = type;
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_metadata(json_object *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	int ret;
+
+	ret = cpfl_json_object_to_uint16(ob_value, "type", &js_fv->meta.type);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_object_to_uint16(ob_value, "offset", &js_fv->meta.offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_object_to_uint16(ob_value, "mask", &js_fv->meta.mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv(json_object *ob_fvs, struct cpfl_flow_js_pr_action *js_act)
+{
+	int len, i;
+
+	len = json_object_array_length(ob_fvs);
+	if (len == 0)
+		return 0;
+	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0);
+	if (!js_act->sem.fv) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_act->sem.fv_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_fv *js_fv;
+		json_object *object, *ob_value;
+		uint16_t offset = 0;
+		const char *type;
+		int ret;
+
+		js_fv = &js_act->sem.fv[i];
+		object = json_object_array_get_idx(ob_fvs, i);
+		ret = cpfl_json_object_to_uint16(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_fv->offset = offset;
+
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		ob_value = json_object_object_get(object, "value");
+		if (strcmp(type, "immediate") == 0) {
+			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
+			js_fv->immediate = json_object_get_int(ob_value);
+		} else if (strcmp(type, "metadata") == 0) {
+			js_fv->type = CPFL_FV_TYPE_METADATA;
+			cpfl_flow_js_pattern_act_fv_metadata(ob_value, js_fv);
+		} else if (strcmp(type, "protocol") == 0) {
+			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
+			cpfl_flow_js_pattern_act_fv_proto(ob_value, js_fv);
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_act->sem.fv);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_per_act(json_object *ob_per_act, struct cpfl_flow_js_pr_action *js_act)
+{
+	const char *type;
+	int ret;
+
+	/* pr->actions->type */
+	type = cpfl_json_object_to_string(ob_per_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* pr->actions->data */
+	if (strcmp(type, "sem") == 0) {
+		json_object *ob_fvs, *ob_sem;
+
+		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+		ob_sem = json_object_object_get(ob_per_act, "data");
+		ret = cpfl_json_object_to_uint16(ob_sem, "profile",
+						 &js_act->sem.prof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(ob_sem, "subprofile",
+						 &js_act->sem.subprof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(ob_sem, "keysize",
+						 &js_act->sem.keysize);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
+			return -EINVAL;
+		}
+		ob_fvs = json_object_object_get(ob_sem, "fieldvectors");
+		ret = cpfl_flow_js_pattern_act_fv(ob_fvs, js_act);
+		if (ret < 0)
+			return ret;
+	} else {
+		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act(json_object *ob_pr_acts, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len, ret;
+
+	len = json_object_array_length(ob_pr_acts);
+	if (len == 0)
+		return 0;
+	js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action) * len, 0);
+	if (!js_pr->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->actions_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_pr_action *js_act;
+		json_object *object;
+
+		object = json_object_array_get_idx(ob_pr_acts, i);
+		js_act = &js_pr->actions[i];
+		ret = cpfl_flow_js_pattern_per_act(object, js_act);
+		if (ret < 0) {
+			rte_free(js_pr->actions);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * The patterns object array defines a set of rules directing the PMD to match sequences of
+ * rte_flow protocol headers and translate them into profile/field vectors for each pipeline
+ * stage. This object is mandatory.
+ */
+static int
+cpfl_flow_js_pattern_rule(json_object *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_object *ob_prs;
+	int i, len;
+
+	/* Pattern Rules */
+	ob_prs = json_object_object_get(ob_root, "patterns");
+	if (!ob_prs) {
+		PMD_DRV_LOG(ERR, "The patterns is mandatory.");
+		return -EINVAL;
+	}
+
+	len = json_object_array_length(ob_prs);
+	if (len == 0)
+		return 0;
+	parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len, 0);
+	if (!parser->patterns) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	parser->pr_size = len;
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		json_object *ob_pr_actions;
+		json_object *ob_pr_key;
+		json_object *ob_pr_key_protos;
+		json_object *ob_pr_key_attrs;
+		int ret;
+
+		object = json_object_array_get_idx(ob_prs, i);
+		/* pr->key */
+		ob_pr_key = json_object_object_get(object, "key");
+		/* pr->key->protocols */
+		ob_pr_key_protos = json_object_object_get(ob_pr_key, "protocols");
+		ret = cpfl_flow_js_pattern_key_proto(ob_pr_key_protos, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->protocols.");
+			goto err;
+		}
+		/* pr->key->attributes */
+		ob_pr_key_attrs = json_object_object_get(ob_pr_key, "attributes");
+		ret = cpfl_flow_js_pattern_key_attr(ob_pr_key_attrs, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->attributes.");
+			goto err;
+		}
+		/* pr->actions */
+		ob_pr_actions = json_object_object_get(object, "actions");
+		ret = cpfl_flow_js_pattern_act(ob_pr_actions, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->patterns);
+	return -EINVAL;
+}
+
+static int
+cpfl_parser_init(json_object *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	int ret = 0;
+
+	ret = cpfl_flow_js_pattern_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char *filename)
+{
+	struct cpfl_flow_js_parser *parser;
+	json_object *root;
+	int ret;
+
+	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser), 0);
+	if (!parser) {
+		PMD_DRV_LOG(ERR, "Not enough memory to create flow parser.");
+		return -ENOMEM;
+	}
+	root = json_object_from_file(filename);
+	if (!root) {
+		PMD_DRV_LOG(ERR, "Can not load JSON file: %s.", filename);
+		rte_free(parser);
+		return -EINVAL;
+	}
+	ret = cpfl_parser_init(root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parser init failed.");
+		goto free_parser;
+	}
+	*flow_parser = parser;
+	ret = json_object_put(root);
+	if (ret != 1) {
+		PMD_DRV_LOG(ERR, "Free json_object failed.");
+		return -EINVAL;
+	}
+
+	return 0;
+free_parser:
+	rte_free(parser);
+	return -EINVAL;
+}
+
+static void
+cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
+{
+	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM)
+		rte_free(pr_act->sem.fv);
+}
+
+int
+cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
+{
+	int i, j;
+
+	if (!parser)
+		return 0;
+
+	for (i = 0; i < parser->pr_size; i++) {
+		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
+
+		if (!pattern)
+			continue;
+		for (j = 0; j < pattern->key.proto_size; j++)
+			rte_free(pattern->key.protocols[j].fields);
+		rte_free(pattern->key.protocols);
+		rte_free(pattern->key.attributes);
+
+		for (j = 0; j < pattern->actions_size; j++) {
+			struct cpfl_flow_js_pr_action *pr_act;
+
+			pr_act = &pattern->actions[j];
+			cpfl_parser_free_pr_action(pr_act);
+		}
+		rte_free(pattern->actions);
+	}
+	rte_free(parser->patterns);
+	rte_free(parser);
+
+	return 0;
+}
+
+static int
+cpfl_get_items_length(const struct rte_flow_item *items)
+{
+	int length = 0;
+	const struct rte_flow_item *item = items;
+
+	while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
+		       uint16_t offset, uint8_t *fv)
+{
+	uint16_t v_layer, v_offset, v_mask;
+	enum rte_flow_item_type v_header;
+	int j, layer, length;
+	uint16_t temp_fv;
+
+	length = cpfl_get_items_length(items);
+	v_layer = js_fv->proto.layer;
+	v_header = js_fv->proto.header;
+	v_offset = js_fv->proto.offset;
+	v_mask = js_fv->proto.mask;
+	layer = 0;
+	for (j = 0; j < length - 1; j++) {
+		if (items[j].type == v_header) {
+			if (layer == v_layer) {
+				/* copy out 16 bits from offset */
+				const uint8_t *pointer;
+
+				pointer = &(((const uint8_t *)(items[j].spec))[v_offset]);
+				temp_fv = ntohs((*((const uint16_t *)pointer)) & v_mask);
+				fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+				fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+				break;
+			}
+			layer++;
+		} /* TODO: more type... */
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_fieldvectors(struct cpfl_itf *itf, struct cpfl_flow_js_fv *js_fvs, int size,
+			uint8_t *fv, const struct rte_flow_item *items)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		uint16_t offset, temp_fv, value_int;
+		enum cpfl_flow_js_fv_type type;
+		struct cpfl_flow_js_fv *js_fv;
+
+		js_fv = &js_fvs[i];
+		offset = js_fv->offset;
+		type = js_fv->type;
+		if (type == CPFL_FV_TYPE_IMMEDIATE) {
+			value_int = js_fv->immediate;
+			temp_fv = (value_int << 8) & 0xff00;
+			fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+		} else if (type == CPFL_FV_TYPE_METADATA) {
+			uint16_t type, v_offset, mask;
+
+			type = js_fv->meta.type;
+			v_offset = js_fv->meta.offset;
+			mask = js_fv->meta.mask;
+			temp_fv = cpfl_metadata_read16(&itf->adapter->meta, type, v_offset) & mask;
+			fv[2 * offset] = (uint8_t)(temp_fv & 0x00ff);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv >> 8);
+		} else if (type == CPFL_FV_TYPE_PROTOCOL) {
+			ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv);
+			if (ret)
+				return ret;
+		} else {
+			PMD_DRV_LOG(DEBUG, "not support this type: %d.", type);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_pr_actions(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_pr_action *actions,
+		      int size,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		struct cpfl_flow_js_pr_action *pr_act;
+		enum cpfl_flow_pr_action_type type;
+
+		pr_act = &actions[i];
+		/* pr->actions->type */
+		type = pr_act->type;
+		/* pr->actions->data */
+		if (attr->group == 1  && type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+			struct cpfl_flow_js_pr_action_sem *sem = &pr_act->sem;
+
+			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+			pr_action->sem.prof = sem->prof;
+			pr_action->sem.subprof = sem->subprof;
+			pr_action->sem.keysize = sem->keysize;
+			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
+			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
+			ret = cpfl_parse_fieldvectors(itf, sem->fv, sem->fv_size,
+						      pr_action->sem.cpfl_flow_pr_fv, items);
+			return ret;
+		} else if (attr->group > 4 || attr->group == 0) {
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_eth_mask(const char *mask, struct rte_ether_addr addr)
+{
+	int i, ret;
+	struct rte_ether_addr mask_bytes;
+
+	ret = rte_ether_unformat_addr(mask, &mask_bytes);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "translate mac address from string to rte_ether_addr failed.");
+		return -EINVAL;
+	}
+	/* validate eth mask addr if match */
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+		if (mask_bytes.addr_bytes[i] != addr.addr_bytes[i])
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
+{
+	uint32_t out_addr;
+
+	/* 0: success; -EINVAL: invalid; -ENOTSUP: fail */
+	int ret = inet_pton(AF_INET, mask, &out_addr);
+
+	if (ret < 0)
+		return -EINVAL;
+	/* validate ipv4 mask addr if match */
+	if (out_addr != addr)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_eth *eth_mask)
+{
+	int field_size, j;
+	int flag_dst_addr, flag_src_addr, flag_ether_type;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !eth_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_ether_type = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name, *s_mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst, more see Field Mapping
+		 */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		if (strcmp(name, "src_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->src) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->dst) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "ether_type") == 0) {
+			uint16_t mask = (uint16_t)field->mask_32b;
+
+			if (mask != eth_mask->type)
+				return -EINVAL;
+			flag_ether_type = true;
+		} else {
+			/* TODO: more type... */
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (strcmp((const char *)eth_mask->src.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (strcmp((const char *)eth_mask->dst.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_ether_type) {
+		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+	int field_size, j;
+	int flag_next_proto_id, flag_src_addr, flag_dst_addr;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !ipv4_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_next_proto_id = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+
+		field = &proto->fields[j];
+		name = field->name;
+		if (strcmp(name, "src_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.src_addr) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.dst_addr) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "next_proto_id") == 0) {
+			uint8_t mask;
+
+			mask = (uint8_t)field->mask_32b;
+			if (mask != ipv4_mask->hdr.next_proto_id)
+				return -EINVAL;
+			flag_next_proto_id = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_next_proto_id) {
+		if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_tcp *tcp_mask)
+{
+	int field_size, j;
+	int flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !tcp_mask)
+		return 0;
+
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (tcp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (tcp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (tcp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (tcp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_udp *udp_mask)
+{
+	int field_size, j;
+	bool flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && !udp_mask)
+		return 0;
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst */
+		name = field->name; /* match: rte_flow_item->mask */
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (udp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (udp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name: %s.", name);
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (udp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (udp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto,
+		 const struct rte_flow_item_vxlan *vxlan_mask)
+{
+	int field_size, j;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && !vxlan_mask)
+		return 0;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		int64_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		mask = (int64_t)field->mask_32b;
+		if (strcmp(name, "vx_vni") == 0) {
+			if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) != mask)
+				return -EINVAL;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_icmp *icmp_mask)
+{
+	int field_size;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if ((field_size != 0 && !icmp_mask) || (field_size == 0 && icmp_mask))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
+			     int proto_size,
+			     const struct rte_flow_item *items)
+{
+	int i, length;
+	int j = 0;
+
+	length = cpfl_get_items_length(items);
+	if (proto_size > length - 1)
+		return -EINVAL;
+	for (i = 0; i < proto_size; i++) {
+		struct cpfl_flow_js_pr_key_proto *key_proto;
+		enum rte_flow_item_type type;
+
+		key_proto = &protocols[i];
+		/* pr->key->proto->type */
+		type = key_proto->type;
+		/* pr->key->proto->fields */
+		switch (type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
+				const struct rte_flow_item_eth *eth_mask;
+				int ret;
+
+				eth_mask = (const struct rte_flow_item_eth *)items[i].mask;
+				ret = cpfl_check_eth(key_proto, eth_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				const struct rte_flow_item_ipv4 *ipv4_mask;
+				int ret;
+
+				ipv4_mask = (const struct rte_flow_item_ipv4 *)items[i].mask;
+				ret = cpfl_check_ipv4(key_proto, ipv4_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) {
+				const struct rte_flow_item_tcp *tcp_mask;
+				int ret;
+
+				tcp_mask = (const struct rte_flow_item_tcp *)items[i].mask;
+				ret = cpfl_check_tcp(key_proto, tcp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) {
+				const struct rte_flow_item_udp *udp_mask;
+				int ret;
+
+				udp_mask = (const struct rte_flow_item_udp *)items[i].mask;
+				ret = cpfl_check_udp(key_proto, udp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+				const struct rte_flow_item_vxlan *vxlan_mask;
+				int ret;
+
+				vxlan_mask = (const struct rte_flow_item_vxlan *)items[i].mask;
+				ret = cpfl_check_vxlan(key_proto, vxlan_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) {
+				const struct rte_flow_item_icmp *icmp_mask;
+				int ret;
+
+				icmp_mask = (const struct rte_flow_item_icmp *)items[i].mask;
+				ret = cpfl_check_icmp(key_proto, icmp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+	if (items[j].type != RTE_FLOW_ITEM_TYPE_END)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr,
+			    const struct rte_flow_attr *attr)
+{
+	if (key_attr->ingress != attr->ingress) {
+		PMD_DRV_LOG(DEBUG, "ingress not match.");
+		return -EINVAL;
+	}
+	if (key_attr->egress != attr->egress) {
+		PMD_DRV_LOG(DEBUG, "egress not match.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern,
+		       const struct rte_flow_item *items,
+		       const struct rte_flow_attr *attr)
+{
+	int ret;
+
+	/* pr->key */
+	/* pr->key->protocols */
+	ret = cpfl_check_pattern_key_proto(pattern->key.protocols,
+					   pattern->key.proto_size, items);
+	if (ret < 0)
+		return -EINVAL;
+	/* pr->key->attributes */
+	ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr);
+	if (ret < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* output: struct cpfl_flow_pr_action* pr_action */
+int
+cpfl_flow_parse_items(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_parser *parser,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, size;
+	struct cpfl_flow_js_pr *pattern;
+
+	size = parser->pr_size;
+	for (i = 0; i < size; i++) {
+		int ret;
+
+		pattern = &parser->patterns[i];
+		ret = cpfl_check_pattern_key(pattern, items, attr);
+		if (ret < 0)
+			continue;
+		/* pr->actions */
+		ret = cpfl_parse_pr_actions(itf, pattern->actions, pattern->actions_size,
+					    items, attr, pr_action);
+		return ret;
+	}
+
+	return -EINVAL;
+}
+
+bool
+cpfl_metadata_write_port_id(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 5;
+
+	dev_id = cpfl_get_port_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID\n");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 3);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_targetvsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 2;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 1);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 0;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
+
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 24;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
new file mode 100644
index 0000000000..af64a158a8
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <json-c/json.h>
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#ifndef _CPFL_FLOW_PARSER_H_
+#define _CPFL_FLOW_PARSER_H_
+
+#define CPFL_FLOW_JSON_STR_SIZE_MAX 100
+#define CPFL_MAX_SEM_FV_KEY_SIZE 64
+#define CPFL_FLOW_JS_PROTO_SIZE 16
+#define CPFL_MOD_KEY_NUM_MAX 8
+
+/* Pattern Rules Storage */
+enum cpfl_flow_pr_action_type {
+	CPFL_JS_PR_ACTION_TYPE_SEM,
+	CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
+};
+
+/* This structure matches a sequence of fields in struct rte_flow_attr */
+struct cpfl_flow_js_pr_key_attr {
+	uint16_t ingress;
+	uint16_t egress;
+};
+
+struct cpfl_flow_js_pr_key_proto_field {
+	char name[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	union {
+		char mask[CPFL_FLOW_JSON_STR_SIZE_MAX];
+		uint32_t mask_32b;
+	};
+};
+
+/* This structure matches a sequence of "struct rte_flow_item" */
+struct cpfl_flow_js_pr_key_proto {
+	enum rte_flow_item_type type;
+	struct cpfl_flow_js_pr_key_proto_field *fields;
+	int fields_size;
+};
+
+enum cpfl_flow_js_fv_type {
+	CPFL_FV_TYPE_PROTOCOL,
+	CPFL_FV_TYPE_IMMEDIATE,
+	CPFL_FV_TYPE_METADATA,
+	CPFL_FV_TYPE_UNKNOWN = -1,
+};
+
+struct cpfl_flow_js_fv {
+	uint16_t offset;
+	enum cpfl_flow_js_fv_type type;
+	union {
+		/*  a 16 bits value */
+		uint16_t immediate;
+		/* a reference to a protocol header with a <header, layer, offset, mask> tuple */
+		struct {
+			enum rte_flow_item_type header;
+			uint16_t layer;
+			uint16_t offset;
+			uint16_t mask;
+		} proto;
+		/* a reference to a metadata */
+		struct {
+			uint16_t type;
+			uint16_t offset;
+			uint16_t mask;
+		} meta;
+	};
+};
+
+/**
+ * This structure defines the message be used to composite the
+ * profile / key of a SEM control packet
+ */
+struct cpfl_flow_js_pr_action_sem {
+	uint16_t prof;		    /* SEM profile ID */
+	uint16_t subprof;	    /* SEM subprofile ID */
+	uint16_t keysize;	    /*  extract key size in bytes */
+	struct cpfl_flow_js_fv *fv; /* A SEM field vector array */
+	int fv_size;
+};
+
+/* define how to map current key to low level pipeline configuration */
+struct cpfl_flow_js_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_js_pr_action_sem sem;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD how to parse rte_flow
+ * protocol headers. Each rule be described by a key object and a action array.
+ */
+struct cpfl_flow_js_pr {
+	struct {
+		struct cpfl_flow_js_pr_key_proto *protocols;
+		uint16_t proto_size;
+		struct cpfl_flow_js_pr_key_attr *attributes;
+		uint16_t attr_size;
+	} key;
+	/* An array to define how to map current key to low level pipeline configuration. */
+	struct cpfl_flow_js_pr_action *actions;
+	uint16_t actions_size;
+};
+
+struct cpfl_flow_js_parser {
+	struct cpfl_flow_js_pr *patterns;
+	int pr_size;
+};
+
+/* Pattern Rules */
+struct cpfl_flow_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	uint8_t cpfl_flow_pr_fv[CPFL_MAX_SEM_FV_KEY_SIZE];
+};
+
+struct cpfl_flow_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_pr_action_sem sem;
+	};
+};
+
+int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
+int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
+int cpfl_flow_parse_items(struct cpfl_itf *itf,
+			  struct cpfl_flow_js_parser *parser,
+			  const struct rte_flow_item *items,
+			  const struct rte_flow_attr *attr,
+			  struct cpfl_flow_pr_action *pr_action);
+bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf);
+
+static inline void
+cpfl_metadata_init(struct cpfl_metadata *meta)
+{
+	int i;
+
+	for (i = 0; i < CPFL_META_LENGTH; i++)
+		meta->chunks[i].type = i;
+}
+
+static inline void
+cpfl_metadata_write16(struct cpfl_metadata *meta, int type, int offset, uint16_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint16_t));
+}
+
+static inline void
+cpfl_metadata_write32(struct cpfl_metadata *meta, int type, int offset, uint32_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint32_t));
+}
+
+static inline uint16_t
+cpfl_metadata_read16(struct cpfl_metadata *meta, int type, int offset)
+{
+	return *((uint16_t *)(&meta->chunks[type].data[offset]));
+}
+
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d8b92ae16a..1e0a1b0290 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -38,3 +38,16 @@ if arch_subdir == 'x86'
         cflags += ['-DCC_AVX512_SUPPORT']
     endif
 endif
+
+js_dep = dependency('json-c', required: false, method : 'pkg-config')
+if js_dep.found()
+    if js_dep.version().version_compare('<0.14')
+        message('json-c lib version is too low')
+    else
+        sources += files(
+                'cpfl_flow_parser.c',
+        )
+        dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
+        ext_deps += js_dep
+    endif
+endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v5 2/9] net/cpfl: add mod rule parser support for rte flow
  2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
  2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
  2023-09-15 10:00       ` [PATCH v5 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
@ 2023-09-15 10:00       ` Zhang, Yuying
  2023-09-15 10:00       ` [PATCH v5 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
                         ` (6 subsequent siblings)
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-15 10:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu
  Cc: mingxia.liu, Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Add json parser support for rte flow modification rules.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_flow_parser.c | 534 +++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_flow_parser.h | 100 ++++++
 2 files changed, 633 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
index 630ce8a227..c33ee1ec27 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.c
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -32,6 +32,18 @@ cpfl_get_item_type_by_str(const char *type)
 	return RTE_FLOW_ITEM_TYPE_VOID;
 }
 
+static enum rte_flow_action_type
+cpfl_get_action_type_by_str(const char *type)
+{
+	if (strcmp(type, "vxlan_encap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	else if (strcmp(type, "vxlan_decap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ACTION_TYPE_VOID;
+}
+
 static const char *
 cpfl_json_object_to_string(json_object *object, const char *name)
 {
@@ -50,6 +62,25 @@ cpfl_json_object_to_string(json_object *object, const char *name)
 	return json_object_get_string(subobject);
 }
 
+static int
+cpfl_json_object_to_int(json_object *object, const char *name, int *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int(subobject);
+
+	return 0;
+}
+
 static int
 cpfl_json_object_to_uint16(json_object *object, const char *name, uint16_t *value)
 {
@@ -517,6 +548,228 @@ cpfl_flow_js_pattern_rule(json_object *ob_root, struct cpfl_flow_js_parser *pars
 	return -EINVAL;
 }
 
+static int
+cpfl_flow_js_mr_key(json_object *ob_mr_keys, struct cpfl_flow_js_mr_key *js_mr_key)
+{
+	int len, i;
+
+	len = json_object_array_length(ob_mr_keys);
+	if (len == 0)
+		return 0;
+	js_mr_key->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_key_action) * len, 0);
+	if (!js_mr_key->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_mr_key->actions_size = len;
+	for (i = 0; i < len; i++) {
+		json_object *object, *ob_data;
+		const char *type;
+		enum rte_flow_action_type act_type;
+
+		object = json_object_array_get_idx(ob_mr_keys, i);
+		/* mr->key->actions->type */
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		act_type = cpfl_get_action_type_by_str(type);
+		if (act_type == RTE_FLOW_ACTION_TYPE_VOID)
+			goto err;
+		js_mr_key->actions[i].type = act_type;
+		/* mr->key->actions->data */
+		ob_data = json_object_object_get(object, "data");
+		if (js_mr_key->actions[i].type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			json_object *ob_protos;
+			int proto_size, j;
+			struct cpfl_flow_js_mr_key_action_vxlan_encap *encap;
+
+			ob_protos = json_object_object_get(ob_data, "protocols");
+			encap = &js_mr_key->actions[i].encap;
+			if (!ob_protos) {
+				encap->proto_size = 0;
+				continue;
+			}
+			proto_size = json_object_array_length(ob_protos);
+			encap->proto_size = proto_size;
+			for (j = 0; j < proto_size; j++) {
+				const char *s;
+				json_object *subobject;
+				enum rte_flow_item_type proto_type;
+
+				subobject = json_object_array_get_idx(ob_protos, j);
+				s = json_object_get_string(subobject);
+				proto_type = cpfl_get_item_type_by_str(s);
+				if (proto_type == RTE_FLOW_ITEM_TYPE_VOID) {
+					PMD_DRV_LOG(ERR, "parse VXLAN_ENCAP failed.");
+					goto err;
+				}
+				encap->protocols[j] = proto_type;
+			}
+		} else if (js_mr_key->actions[i].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			PMD_DRV_LOG(ERR, "not support this type: %d.", js_mr_key->actions[i].type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mr_key->actions);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_layout(json_object *ob_layouts, struct cpfl_flow_js_mr_action_mod *js_mod)
+{
+	int len, i;
+
+	len = json_object_array_length(ob_layouts);
+	js_mod->layout_size = len;
+	if (len == 0)
+		return 0;
+	js_mod->layout = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_layout) * len, 0);
+	if (!js_mod->layout) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		int index = 0, size = 0, offset = 0;
+		int ret;
+		const char *hint;
+
+		object = json_object_array_get_idx(ob_layouts, i);
+		ret = cpfl_json_object_to_int(object, "index", &index);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'index'.");
+			goto err;
+		}
+		js_mod->layout[i].index = index;
+		ret = cpfl_json_object_to_int(object, "size", &size);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+			goto err;
+		}
+		js_mod->layout[i].size = size;
+		ret = cpfl_json_object_to_int(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_mod->layout[i].offset = offset;
+		hint = cpfl_json_object_to_string(object, "hint");
+		if (!hint) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'hint'.");
+			goto err;
+		}
+		memcpy(js_mod->layout[i].hint, hint, strlen(hint));
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mod->layout);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_action(json_object *ob_mr_act, struct cpfl_flow_js_mr_action *js_mr_act)
+{
+	json_object *ob_data;
+	const char *type;
+
+	/* mr->action->type */
+	type = cpfl_json_object_to_string(ob_mr_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* mr->action->data */
+	ob_data = json_object_object_get(ob_mr_act, "data");
+	if (strcmp(type, "mod") == 0) {
+		json_object *ob_layouts;
+		uint16_t profile = 0;
+		int ret;
+
+		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		ret = cpfl_json_object_to_uint16(ob_data, "profile", &profile);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		js_mr_act->mod.prof = profile;
+		ob_layouts = json_object_object_get(ob_data, "layout");
+		ret = cpfl_flow_js_mr_layout(ob_layouts, &js_mr_act->mod);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse layout.");
+			return ret;
+		}
+	} else  {
+		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * The modifications object array defines a set of rules for the PMD to match rte_flow
+ * modification actions and translate them into the Modification profile. This object
+ * is optional.
+ */
+static int
+cpfl_flow_js_mod_rule(json_object *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_object *ob_mrs;
+	int i, len;
+
+	ob_mrs = json_object_object_get(ob_root, "modifications");
+	if (!ob_mrs) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+	len = json_object_array_length(ob_mrs);
+	if (len == 0)
+		return 0;
+	parser->mr_size = len;
+	parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr) * len, 0);
+	if (!parser->modifications) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		int ret;
+		json_object *object, *ob_mr_key, *ob_mr_action, *ob_mr_key_action;
+
+		object = json_object_array_get_idx(ob_mrs, i);
+		/* mr->key */
+		ob_mr_key = json_object_object_get(object, "key");
+		/* mr->key->actions */
+		ob_mr_key_action = json_object_object_get(ob_mr_key, "actions");
+		ret = cpfl_flow_js_mr_key(ob_mr_key_action, &parser->modifications[i].key);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_key failed.");
+			goto err;
+		}
+		/* mr->action */
+		ob_mr_action = json_object_object_get(object, "action");
+		ret = cpfl_flow_js_mr_action(ob_mr_action, &parser->modifications[i].action);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_action failed.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->modifications);
+	return -EINVAL;
+}
+
 static int
 cpfl_parser_init(json_object *ob_root, struct cpfl_flow_js_parser *parser)
 {
@@ -527,6 +780,11 @@ cpfl_parser_init(json_object *ob_root, struct cpfl_flow_js_parser *parser)
 		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
 		return ret;
 	}
+	ret = cpfl_flow_js_mod_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
+		return ret;
+	}
 
 	return 0;
 }
@@ -601,6 +859,15 @@ cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
 		rte_free(pattern->actions);
 	}
 	rte_free(parser->patterns);
+	for (i = 0; i < parser->mr_size; i++) {
+		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
+
+		if (!mr)
+			continue;
+		rte_free(mr->key.actions);
+		rte_free(mr->action.mod.layout);
+	}
+	rte_free(parser->modifications);
 	rte_free(parser);
 
 	return 0;
@@ -617,6 +884,17 @@ cpfl_get_items_length(const struct rte_flow_item *items)
 	return length;
 }
 
+static int
+cpfl_get_actions_length(const struct rte_flow_action *actions)
+{
+	int length = 0;
+	const struct rte_flow_action *action = actions;
+
+	while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END)
+		continue;
+	return length;
+}
+
 static int
 cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
 		       uint16_t offset, uint8_t *fv)
@@ -645,7 +923,7 @@ cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item
 				break;
 			}
 			layer++;
-		} /* TODO: more type... */
+		}
 	}
 
 	return 0;
@@ -1234,6 +1512,260 @@ cpfl_flow_parse_items(struct cpfl_itf *itf,
 	return -EINVAL;
 }
 
+/* modifications rules */
+static int
+cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap *encap,
+			       const struct rte_flow_action *action)
+{
+	const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+	struct rte_flow_item *definition;
+	int def_length, i, proto_size;
+
+	action_vxlan_encap = (const struct rte_flow_action_vxlan_encap *)action->conf;
+	definition = action_vxlan_encap->definition;
+	def_length = cpfl_get_items_length(definition);
+	proto_size = encap->proto_size;
+	if (proto_size != def_length - 1) {
+		PMD_DRV_LOG(DEBUG, "protocols not match.");
+		return -EINVAL;
+	}
+	for (i = 0; i < proto_size; i++) {
+		enum rte_flow_item_type proto;
+
+		proto = encap->protocols[i];
+		if (proto == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) {
+				PMD_DRV_LOG(DEBUG, "protocols not match.");
+				return -EINVAL;
+			}
+		} else if (proto != definition[i].type) {
+			PMD_DRV_LOG(DEBUG, "protocols not match.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* check and parse */
+static int
+cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int size,
+			 const struct rte_flow_action *actions,
+			 struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int actions_length, i;
+	int j = 0;
+	int ret;
+
+	actions_length = cpfl_get_actions_length(actions);
+	if (size > actions_length - 1)
+		return -EINVAL;
+	for (i = 0; i < size; i++) {
+		enum rte_flow_action_type type;
+		struct cpfl_flow_js_mr_key_action *key_act;
+
+		key_act = &key_acts[i];
+		/* mr->key->actions->type */
+		type = key_act->type;
+		/* mr->key->actions->data */
+		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			int proto_size, k;
+			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
+
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+			mr_key_action[i].encap.action = &actions[j];
+			encap = &mr_key_action[i].encap;
+
+			proto_size = key_act->encap.proto_size;
+			encap->proto_size = proto_size;
+			for (k = 0; k < proto_size; k++) {
+				enum rte_flow_item_type proto;
+
+				proto = key_act->encap.protocols[k];
+				encap->protocols[k] = proto;
+			}
+			ret = cpfl_check_actions_vxlan_encap(encap, &actions[j]);
+			if (ret < 0)
+				return -EINVAL;
+			j++;
+		} else if (type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+			j++;
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+/* output: uint8_t *buffer, uint16_t *byte_len */
+static int
+cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
+		  struct cpfl_flow_mr_key_action *mr_key_action,
+		  uint8_t *buffer, uint16_t *byte_len)
+{
+	int i;
+	int start = 0;
+
+	for (i = 0; i < layout_size; i++) {
+		int index, size, offset;
+		const char *hint;
+		const uint8_t *addr = NULL;
+		struct cpfl_flow_mr_key_action *temp;
+		struct cpfl_flow_js_mr_layout *layout;
+
+		layout = &layouts[i];
+		/* index links to the element of the actions array. */
+		index = layout->index;
+		size = layout->size;
+		offset = layout->offset;
+		if (index == -1) {
+			hint = "dummpy";
+			start += size;
+			continue;
+		}
+		hint = layout->hint;
+		temp = mr_key_action + index;
+		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+			struct rte_flow_item *definition;
+			int def_length, k;
+
+			action_vxlan_encap =
+			    (const struct rte_flow_action_vxlan_encap *)temp->encap.action->conf;
+			definition = action_vxlan_encap->definition;
+			def_length = cpfl_get_items_length(definition);
+			for (k = 0; k < def_length - 1; k++) {
+				if ((strcmp(hint, "eth") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_ETH) ||
+				    (strcmp(hint, "ipv4") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+				    (strcmp(hint, "udp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_UDP) ||
+				    (strcmp(hint, "tcp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_TCP) ||
+				    (strcmp(hint, "vxlan") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_VXLAN)) {
+					addr = (const uint8_t *)(definition[k].spec);
+					if (start > 255) {
+						*byte_len = 0;
+						PMD_DRV_LOG(ERR, "byte length is too long: %s",
+							    hint);
+						return -EINVAL;
+					}
+					memcpy(buffer + start, addr + offset, size);
+					break;
+				} /* TODO: more hint... */
+			}
+			if (k == def_length - 1) {
+				*byte_len = 0;
+				PMD_DRV_LOG(ERR, "can not find corresponding hint: %s", hint);
+				return -EINVAL;
+			}
+		} else {
+			*byte_len = 0;
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", temp->type);
+			return -EINVAL;
+		} /* else TODO: more type... */
+		start += size;
+	}
+	*byte_len = start;
+
+	return 0;
+}
+
+static int
+cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action,
+		     struct cpfl_flow_mr_key_action *mr_key_action,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	enum cpfl_flow_mr_action_type type;
+
+	/* mr->action->type */
+	type = action->type;
+	/* mr->action->data */
+	if (type == CPFL_JS_MR_ACTION_TYPE_MOD) {
+		struct cpfl_flow_js_mr_layout *layout;
+
+		mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		mr_action->mod.byte_len = 0;
+		mr_action->mod.prof = action->mod.prof;
+		layout = action->mod.layout;
+		if (!layout)
+			return 0;
+		memset(mr_action->mod.data, 0, sizeof(mr_action->mod.data));
+
+		return cpfl_parse_layout(layout, action->mod.layout_size, mr_key_action,
+					 mr_action->mod.data, &mr_action->mod.byte_len);
+	}
+	PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+
+	return -EINVAL;
+}
+
+static int
+cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action *actions,
+		   struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int key_action_size;
+
+	/* mr->key->actions */
+	key_action_size = mr->key.actions_size;
+	return cpfl_parse_mr_key_action(mr->key.actions, key_action_size, actions, mr_key_action);
+}
+
+/* output: struct cpfl_flow_mr_action *mr_action */
+static int
+cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	int i;
+	struct cpfl_flow_mr_key_action mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0};
+
+	for (i = 0; i < parser->mr_size; i++) {
+		int ret;
+		struct cpfl_flow_js_mr *mr;
+
+		mr = &parser->modifications[i];
+		if (!mr)
+			return -EINVAL;
+		ret = cpfl_check_mod_key(mr, actions, mr_key_action);
+		if (ret < 0)
+			continue;
+		/* mr->action */
+		return cpfl_parse_mr_action(&mr->action, mr_key_action, mr_action);
+	}
+
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+			struct cpfl_flow_mr_action *mr_action)
+{
+	/* modifications rules */
+	if (!parser->modifications) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	return cpfl_parse_mod_rules(parser, actions, mr_action);
+}
+
 bool
 cpfl_metadata_write_port_id(struct cpfl_itf *itf)
 {
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
index af64a158a8..2618a9a81f 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.h
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -105,9 +105,79 @@ struct cpfl_flow_js_pr {
 	uint16_t actions_size;
 };
 
+/* Modification Rules Storage */
+/**
+ * The vxlan_encap action matches RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
+ * and include a sequence of protocol headers defined in field protocols
+ * of data.
+ */
+struct cpfl_flow_js_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	int proto_size;
+};
+
+/* A set of modification rte_flow_action_xxx objects can be defined as a type / data pair. */
+struct cpfl_flow_js_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_js_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_js_mr_key {
+	struct cpfl_flow_js_mr_key_action *actions;
+	int actions_size;
+};
+
+struct cpfl_flow_js_mr_layout {
+	int index;				/* links to the element of the actions array */
+	char hint[CPFL_FLOW_JSON_STR_SIZE_MAX]; /* where the data to copy from */
+	uint16_t offset;			/* the start byte of the data to copy from */
+	uint16_t size; /*  bytes of the data to be copied to the memory region */
+};
+
+/** For mod data, besides the profile ID, a layout array defines a set of hints that helps
+ * driver composing the MOD memory region when the action need to insert/update some packet
+ * data from user input.
+ */
+struct cpfl_flow_js_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	struct cpfl_flow_js_mr_layout *layout;
+	int layout_size;
+};
+
+enum cpfl_flow_mr_action_type {
+	CPFL_JS_MR_ACTION_TYPE_MOD,
+};
+
+/** Currently, the type can only be mod.
+ *
+ * For mod data, besides the profile ID, a layout array defines a set
+ * of hints that helps driver composing the MOD memory region when the
+ * action need to insert/update some packet data from user input.
+ */
+struct cpfl_flow_js_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_js_mr_action_mod mod;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD to parse rte_flow modification
+ * actions. Each rule be described by a pair of key and action
+ */
+struct cpfl_flow_js_mr {
+	struct cpfl_flow_js_mr_key key;
+	struct cpfl_flow_js_mr_action action;
+};
+
 struct cpfl_flow_js_parser {
 	struct cpfl_flow_js_pr *patterns;
 	int pr_size;
+	struct cpfl_flow_js_mr *modifications;
+	int mr_size;
 };
 
 /* Pattern Rules */
@@ -125,6 +195,33 @@ struct cpfl_flow_pr_action {
 	};
 };
 
+/* Modification Rules */
+struct cpfl_flow_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	uint16_t proto_size;
+	const struct rte_flow_action *action;
+};
+
+struct cpfl_flow_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	uint8_t data[256];
+};
+
+struct cpfl_flow_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_mr_action_mod mod;
+	};
+};
+
 int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
 int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
 int cpfl_flow_parse_items(struct cpfl_itf *itf,
@@ -132,6 +229,9 @@ int cpfl_flow_parse_items(struct cpfl_itf *itf,
 			  const struct rte_flow_item *items,
 			  const struct rte_flow_attr *attr,
 			  struct cpfl_flow_pr_action *pr_action);
+int cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser,
+			    const struct rte_flow_action *actions,
+			    struct cpfl_flow_mr_action *mr_action);
 bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
 bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
 bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v5 3/9] net/cpfl: set up rte flow skeleton
  2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
                         ` (2 preceding siblings ...)
  2023-09-15 10:00       ` [PATCH v5 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
@ 2023-09-15 10:00       ` Zhang, Yuying
  2023-09-15 10:00       ` [PATCH v5 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
                         ` (5 subsequent siblings)
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-15 10:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Set up the rte_flow backend skeleton. Introduce the framework
to support different engines as rte_flow backend. Bridge rte_flow
driver API to flow engines.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  53 ++++++
 drivers/net/cpfl/cpfl_ethdev.h |   5 +
 drivers/net/cpfl/cpfl_flow.c   | 339 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow.h   |  85 +++++++++
 drivers/net/cpfl/meson.build   |   1 +
 5 files changed, 483 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 54ae127cc3..44418ce325 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -15,6 +15,7 @@
 #include "cpfl_ethdev.h"
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1074,6 +1075,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+	struct rte_flow *p_flow;
+
+	while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+		TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+		if (p_flow->engine->free)
+			p_flow->engine->free(p_flow);
+		rte_free(p_flow);
+	}
+}
+
 static int
 cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
 {
@@ -1105,6 +1119,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
 		cpfl_p2p_queue_grps_del(vport);
 
+	cpfl_flow_free(cpfl_vport);
 	idpf_vport_deinit(vport);
 	rte_free(cpfl_vport->p2p_q_chunks_info);
 
@@ -1117,6 +1132,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+		      const struct rte_flow_ops **ops)
+{
+	struct cpfl_itf *itf;
+
+	if (!dev)
+		return -EINVAL;
+
+	itf = CPFL_DEV_TO_ITF(dev);
+
+	/* only vport support rte_flow */
+	if (itf->type != CPFL_ITF_TYPE_VPORT)
+		return -ENOTSUP;
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static int
 cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 			    size_t len, uint32_t tx)
@@ -1318,6 +1356,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.xstats_get			= cpfl_dev_xstats_get,
 	.xstats_get_names		= cpfl_dev_xstats_get_names,
 	.xstats_reset			= cpfl_dev_xstats_reset,
+	.flow_ops_get			= cpfl_dev_flow_ops_get,
 	.hairpin_cap_get		= cpfl_hairpin_cap_get,
 	.rx_hairpin_queue_setup		= cpfl_rx_hairpin_queue_setup,
 	.tx_hairpin_queue_setup		= cpfl_tx_hairpin_queue_setup,
@@ -2021,6 +2060,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	ret = cpfl_flow_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init flow module");
+		goto err_flow_init;
+	}
+#endif
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2028,6 +2074,9 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+err_flow_init:
+#endif
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2182,6 +2231,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
 	cpfl_vport->itf.adapter = adapter;
 	cpfl_vport->itf.data = dev->data;
+	TAILQ_INIT(&cpfl_vport->itf.flow_list);
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
@@ -2262,6 +2312,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	cpfl_flow_uninit(adapter);
+#endif
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 383dbd14c6..69bf32cfbd 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -140,9 +140,12 @@ enum cpfl_itf_type {
 	CPFL_ITF_TYPE_REPRESENTOR,
 };
 
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
+	struct cpfl_flow_list flow_list;
 	void *data;
 };
 
@@ -206,6 +209,8 @@ struct cpfl_adapter_ext {
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
 
+	struct cpfl_flow_js_parser *flow_parser;
+
 	struct cpfl_metadata meta;
 };
 
diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c
new file mode 100644
index 0000000000..03dd1ffa44
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.c
@@ -0,0 +1,339 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+	TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->parse_pattern_action)
+			continue;
+
+		if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+			continue;
+		return engine;
+	}
+
+	return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+	int ret;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->init) {
+			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+				     engine->type);
+			return -ENOTSUP;
+		}
+
+		ret = engine->init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+				     engine->type);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (engine->uninit)
+			engine->uninit(adapter);
+	}
+}
+
+static int
+cpfl_flow_attr_valid(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	if (attr->priority > CPFL_PREC_MAX) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Only support priority 0-7.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	int ret;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = cpfl_flow_attr_valid(attr, error);
+	if (ret)
+		return ret;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+__cpfl_flow_validate(struct rte_eth_dev *dev,
+		     const struct rte_flow_attr *attr,
+		     const struct rte_flow_item pattern[],
+		     const struct rte_flow_action actions[],
+		     void **meta,
+		     struct cpfl_flow_engine **engine,
+		     struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+	if (ret)
+		return ret;
+
+	*engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta);
+	if (!*engine) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched engine.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	int ret;
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error);
+
+	return ret;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_engine *engine = NULL;
+	struct rte_flow *flow;
+	void *meta;
+	int ret;
+
+	flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return NULL;
+	}
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	if (!engine->create) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched flow creation function");
+		rte_free(flow);
+		return NULL;
+	}
+
+	ret = engine->create(dev, flow, meta, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	flow->engine = engine;
+	TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+	return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	int ret = 0;
+
+	if (!flow || !flow->engine || !flow->engine->destroy) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	ret = flow->engine->destroy(dev, flow, error);
+	if (!ret)
+		TAILQ_REMOVE(&itf->flow_list, flow, next);
+	else
+		PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+	return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct rte_flow *p_flow;
+	void *temp;
+	int ret = 0;
+
+	RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+		ret = cpfl_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to flush flows");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		const struct rte_flow_action *actions,
+		void *data,
+		struct rte_flow_error *error)
+{
+	struct rte_flow_query_count *count = data;
+	int ret = -EINVAL;
+
+	if (!flow || !flow->engine || !flow->engine->query_count) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			ret = flow->engine->query_count(dev, flow, count, error);
+			break;
+		default:
+			ret = rte_flow_error_set(error, ENOTSUP,
+						 RTE_FLOW_ERROR_TYPE_ACTION,
+						 actions,
+						 "action not supported");
+			break;
+		}
+	}
+
+	return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+	.validate = cpfl_flow_validate,
+	.create = cpfl_flow_create,
+	.destroy = cpfl_flow_destroy,
+	.flush = cpfl_flow_flush,
+	.query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+	int ret;
+
+	if (ad->devargs.flow_parser[0] == '\0') {
+		PMD_INIT_LOG(WARNING, "flow module is not initialized");
+		return 0;
+	}
+
+	ret = cpfl_flow_engine_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init flow engines");
+		goto err;
+	}
+
+	ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to create flow parser");
+		goto err;
+	}
+
+	return ret;
+
+err:
+	cpfl_flow_engine_uninit(ad);
+	return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+	if (ad->devargs.flow_parser[0] == '\0')
+		return;
+
+	cpfl_parser_destroy(ad->flow_parser);
+	cpfl_flow_engine_uninit(ad);
+}
diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h
new file mode 100644
index 0000000000..8c19b853ca
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.h
@@ -0,0 +1,85 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#define CPFL_PREC_MAX 7
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+	CPFL_FLOW_ENGINE_NONE = 0,
+	CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+			       struct rte_flow *flow,
+			       void *meta,
+			       struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+				struct rte_flow *flow,
+				struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+			      struct rte_flow *flow,
+			      struct rte_flow_query_count *count,
+			      struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+					     const struct rte_flow_attr *attr,
+					     const struct rte_flow_item pattern[],
+					     const struct rte_flow_action actions[],
+					     void **meta);
+
+struct cpfl_flow_engine {
+	TAILQ_ENTRY(cpfl_flow_engine) node;
+	enum cpfl_flow_engine_type type;
+	engine_init_t init;
+	engine_uninit_t uninit;
+	engine_create_t create;
+	engine_destroy_t destroy;
+	engine_query_t query_count;
+	engine_free_t free;
+	engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) next;
+	struct cpfl_flow_engine *engine;
+	void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev,
+						const struct rte_flow_attr *attr,
+						const struct rte_flow_item pattern[],
+						const struct rte_flow_action actions[],
+						void **meta);
+int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+				  const struct rte_flow_attr *attr,
+				  const struct rte_flow_item pattern[],
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+		    struct rte_flow *flow,
+		    const struct rte_flow_action *actions,
+		    void *data,
+		    struct rte_flow_error *error);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 1e0a1b0290..9f1818f8dc 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -45,6 +45,7 @@ if js_dep.found()
         message('json-c lib version is too low')
     else
         sources += files(
+		'cpfl_flow.c',
                 'cpfl_flow_parser.c',
         )
         dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v5 4/9] net/cpfl: add FXP low level implementation
  2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
                         ` (3 preceding siblings ...)
  2023-09-15 10:00       ` [PATCH v5 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
@ 2023-09-15 10:00       ` Zhang, Yuying
  2023-09-15 11:19         ` Zhang, Qi Z
  2023-09-15 10:00       ` [PATCH v5 5/9] net/cpfl: add fxp rule module Zhang, Yuying
                         ` (4 subsequent siblings)
  9 siblings, 1 reply; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-15 10:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Add FXP low level implementation for CPFL rte_flow to
create/delete rules as well as setup the control vport
and control queue.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_actions.h  | 858 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.c | 379 ++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  51 ++
 drivers/net/cpfl/cpfl_ethdev.c   | 270 ++++++++++
 drivers/net/cpfl/cpfl_ethdev.h   |  14 +
 drivers/net/cpfl/cpfl_rules.c    | 126 +++++
 drivers/net/cpfl/cpfl_rules.h    | 306 +++++++++++
 drivers/net/cpfl/cpfl_vchnl.c    | 144 ++++++
 drivers/net/cpfl/meson.build     |   2 +
 9 files changed, 2150 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h
new file mode 100644
index 0000000000..7b82119e39
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_actions.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_ACTIONS_H_
+#define _CPFL_ACTIONS_H_
+
+#include "base/idpf_osdep.h"
+
+#pragma pack(1)
+
+union cpfl_action_set {
+	uint32_t data;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 4;
+		uint32_t tag : 1;
+		uint32_t prec : 3;
+	} set_24b_a;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 3;
+		uint32_t tag : 2;
+		uint32_t prec : 3;
+	} set_24b_b;
+
+	struct {
+		uint32_t val : 16;
+		uint32_t idx : 4;
+		uint32_t unused : 6;
+		uint32_t tag : 3;
+		uint32_t prec : 3;
+	} set_16b;
+
+	struct {
+		uint32_t val_a : 8;
+		uint32_t val_b : 8;
+		uint32_t idx_a : 4;
+		uint32_t idx_b : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_8b;
+
+	struct {
+		uint32_t val : 10;
+		uint32_t ena : 10;
+		uint32_t idx : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_1b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} nop;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} chained_24b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} aux_flags;
+};
+
+struct cpfl_action_set_ext {
+#define CPFL_ACTION_SET_EXT_CNT 2
+	union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT];
+};
+
+#pragma pack()
+
+/**
+ * cpfl_act_nop - Encode a NOP action
+ */
+static inline union cpfl_action_set
+cpfl_act_nop(void)
+{
+	union cpfl_action_set act;
+
+	act.data = 0;
+	return act;
+}
+
+/**
+ * cpfl_is_nop_action - Indicate if an action set is a NOP
+ */
+static inline bool
+cpfl_is_nop_action(union cpfl_action_set *act)
+{
+	return act->data == cpfl_act_nop().data;
+}
+
+#define CPFL_MAKE_MASK32(b, s)	((((uint32_t)1 << (b)) - 1) << (s))
+
+#define CPFL_ACT_PREC_MAX	7
+#define CPFL_ACT_PREC_S		29
+#define CPFL_ACT_PREC_M		CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S)
+#define CPFL_ACT_PREC_SET(p)	\
+	(((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M)
+#define CPFL_ACT_PREC_CHECK(p)	((p) > 0 && (p) <= CPFL_ACT_PREC_MAX)
+
+#define CPFL_METADATA_ID_CNT		32	/* Max number of metadata IDs */
+#define CPFL_METADATA_STRUCT_MAX_SZ	128	/* Max metadata size per ID */
+
+/*******************************************************************************
+ * 1-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_1B_OP_S	24
+#define CPFL_ACT_1B_OP_M	CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S)
+#define CPFL_ACT_1B_OP		((uint32_t)(0x01) << CPFL_ACT_1B_OP_S)
+
+#define CPFL_ACT_1B_VAL_S	0
+#define CPFL_ACT_1B_VAL_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S)
+#define CPFL_ACT_1B_EN_S	10
+#define CPFL_ACT_1B_EN_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S)
+#define CPFL_ACT_1B_INDEX_S	20
+#define CPFL_ACT_1B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S)
+
+/* 1-bit actions currently uses only INDEX of 0 */
+#define CPFL_ACT_MAKE_1B(prec, en, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \
+	 ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \
+	 (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M))
+
+enum cpfl_act_1b_op {
+	CPFL_ACT_1B_OP_DROP		= 0x01,
+	CPFL_ACT_1B_OP_HDR_SPLIT	= 0x02,
+	CPFL_ACT_1B_OP_DIR_CHANGE	= 0x04,
+	CPFL_ACT_1B_OP_DEFER_DROP	= 0x08,
+	CPFL_ACT_1B_OP_ORIG_MIR_MD	= 0x80
+};
+
+#define CPFL_ACT_1B_COMMIT_MODE_S	4
+#define CPFL_ACT_1B_COMMIT_MODE_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S)
+
+/**
+ * cpfl_act_commit_mode - action commit mode for certain action classes
+ */
+enum cpfl_act_commit_mode {
+	/* Action processing for the initial classification pass */
+	CPFL_ACT_COMMIT_ALL		= 0, /* Commit all actions */
+	CPFL_ACT_COMMIT_PRE_MOD		= 1, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_NONE		= 2, /* Commit no action */
+	/* Action processing for deferred actions in a recirculation pass */
+	CPFL_ACT_COMMIT_RECIR_ALL	= 4, /* Commit all actions */
+	CPFL_ACT_COMMIT_RECIR_PRE_MOD	= 5, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_RECIR_NONE	= 6  /* Commit no action */
+};
+
+/*******************************************************************************
+ * 8-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_8B_S	24
+#define CPFL_ACT_OP_8B_M	CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S)
+#define CPFL_ACT_OP_8B		((uint32_t)(0x02) << CPFL_ACT_OP_8B_S)
+
+#define CPFL_ACT_8B_A_VAL_S	0
+#define CPFL_ACT_8B_A_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S)
+#define CPFL_ACT_8B_A_INDEX_S	16
+#define CPFL_ACT_8B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S)
+
+#define CPFL_ACT_8B_B_VAL_S	8
+#define CPFL_ACT_8B_B_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S)
+#define CPFL_ACT_8B_B_INDEX_S	20
+#define CPFL_ACT_8B_B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S)
+
+/* Unless combining two 8-bit actions into an action set, both A and B fields
+ * must be the same,
+ */
+#define CPFL_ACT_MAKE_8B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \
+	 (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \
+	 (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \
+	 (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \
+	 (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M))
+
+/* 8-Bit Action Indices */
+#define CPFL_ACT_8B_INDEX_MOD_META		9
+
+/* 8-Bit Action Miscellaneous */
+#define CPFL_ACT_8B_MOD_META_PROF_CNT		16
+#define CPFL_ACT_8B_MOD_META_VALID		0x80
+
+/*******************************************************************************
+ * 16-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_16B_S	26
+#define CPFL_ACT_OP_16B_M	CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S)
+#define CPFL_ACT_OP_16B		((uint32_t)0x1 << CPFL_ACT_OP_16B_S)
+
+#define CPFL_ACT_16B_INDEX_S	16
+#define CPFL_ACT_16B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S)
+#define CPFL_ACT_16B_VAL_S	0
+#define CPFL_ACT_16B_VAL_M	CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S)
+
+#define CPFL_ACT_MAKE_16B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \
+	 (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M))
+
+/* 16-Bit Action Indices */
+#define CPFL_ACT_16B_INDEX_COUNT_SET		0
+#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX	1
+#define CPFL_ACT_16B_INDEX_SET_VSI		2
+#define CPFL_ACT_16B_INDEX_DEL_MD		4
+#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST		5
+
+/* 16-Bit Action Miscellaneous */
+#define CPFL_ACT_16B_COUNT_SET_CNT		2048 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_SET_VSI_SLOTS		2
+#define CPFL_ACT_16B_FWD_VSI_CNT		1032 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_FWD_VSI_LIST_CNT		256
+#define CPFL_ACT_16B_MOD_VSI_LIST_CNT		1024
+#define CPFL_ACT_16B_FWD_PORT_CNT		4
+#define CPFL_ACT_16B_DEL_MD_MID_CNT		32
+#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS		4
+
+/* 16-Bit SET_MCAST_IDX Action */
+#define CPFL_ACT_16B_SET_MCAST_VALID	((uint32_t)1 << 15)
+
+/* 16-Bit SET_VSI Action Variants */
+#define CPFL_ACT_16B_SET_VSI_VAL_S		0
+#define CPFL_ACT_16B_SET_VSI_VAL_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S)
+#define CPFL_ACT_16B_SET_VSI_PE_S		11
+#define CPFL_ACT_16B_SET_VSI_PE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S)
+#define CPFL_ACT_16B_SET_VSI_TYPE_S		14
+#define CPFL_ACT_16B_SET_VSI_TYPE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S)
+
+/* 16-Bit DEL_MD Action */
+#define CPFL_ACT_16B_DEL_MD_0_S		0
+#define CPFL_ACT_16B_DEL_MD_1_S		5
+
+/* 16-Bit MOD_VSI_LIST Actions */
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S	0
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M	\
+	CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S)
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S	14
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S)
+#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \
+	((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \
+	 (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_ID_M))
+
+#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \
+	((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \
+		CPFL_ACT_16B_SET_VSI_TYPE_M) | \
+	 (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \
+		CPFL_ACT_16B_SET_VSI_PE_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \
+		CPFL_ACT_16B_SET_VSI_VAL_M))
+
+enum cpfl_prot_eng {
+	CPFL_PE_LAN = 0,
+	CPFL_PE_RDMA,
+	CPFL_PE_CRT
+};
+
+enum cpfl_act_fwd_type {
+	CPFL_ACT_FWD_VSI,
+	CPFL_ACT_FWD_VSI_LIST,
+	CPFL_ACT_FWD_PORT
+};
+
+/*******************************************************************************
+ * 24-Bit Actions
+ ******************************************************************************/
+/* Group A */
+#define CPFL_ACT_OP_24B_A_S	28
+#define CPFL_ACT_OP_24B_A_M	CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S)
+#define CPFL_ACT_24B_A_INDEX_S	24
+#define CPFL_ACT_24B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S)
+#define CPFL_ACT_24B_A_VAL_S	0
+#define CPFL_ACT_24B_A_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S)
+
+#define CPFL_ACT_OP_24B_A	((uint32_t)1 << CPFL_ACT_OP_24B_A_S)
+
+#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_MOD_ADDR	0
+#define CPFL_ACT_24B_INDEX_MIRROR_FIRST	1
+#define CPFL_ACT_24B_INDEX_COUNT	2
+#define CPFL_ACT_24B_INDEX_SET_Q	8
+#define CPFL_ACT_24B_INDEX_MOD_PROFILE	9
+#define CPFL_ACT_24B_INDEX_METER	10
+
+#define CPFL_ACT_24B_COUNT_SLOTS	6
+#define CPFL_ACT_24B_METER_SLOTS	6
+
+#define CPFL_ACT_24B_MOD_ADDR_CNT	(16 * 1024 * 1024)
+#define CPFL_ACT_24B_COUNT_ID_CNT	((uint32_t)1 << 24)
+#define CPFL_ACT_24B_SET_Q_CNT		(12 * 1024)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS	3
+
+/* 24-Bit SET_Q Action */
+#define CPFL_ACT_24B_SET_Q_Q_S		0
+#define CPFL_ACT_24B_SET_Q_Q_M		\
+	CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_S	14
+#define CPFL_ACT_24B_SET_Q_Q_RGN_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S)
+#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS	CPFL_MAKE_MASK32(1, 17)
+#define CPFL_ACT_24B_SET_Q_DST_PE_S	21
+#define CPFL_ACT_24B_SET_Q_DST_PE_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S)
+#define CPFL_ACT_24B_SET_Q_VALID	CPFL_MAKE_MASK32(1, 23)
+
+/* 24-Bit MOD_PROFILE Action */
+enum cpfl_act_mod_profile_hint {
+	CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */
+};
+
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_S		0
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S)
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S	12
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S)
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_S		14
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S)
+#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS		((uint32_t)1 << 16)
+#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND	((uint32_t)1 << 17)
+#define CPFL_ACT_24B_MOD_PROFILE_VALID			((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES	4
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT		2048
+
+/* 24-Bit METER Actions */
+#define CPFL_ACT_24B_METER_INDEX_S	0
+#define CPFL_ACT_24B_METER_INDEX_M	\
+	CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S)
+#define CPFL_ACT_24B_METER_BANK_S	20
+#define CPFL_ACT_24B_METER_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S)
+#define CPFL_ACT_24B_METER_VALID	((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_METER_BANK_CNT	6
+#define CPFL_ACT_24B_METER_INDEX_CNT	((uint32_t)1 << 20)
+
+/* Group B */
+#define CPFL_ACT_OP_24B_B_S	27
+#define CPFL_ACT_OP_24B_B_M	CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S)
+#define CPFL_ACT_24B_B_INDEX_S	24
+#define CPFL_ACT_24B_B_INDEX_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S)
+#define CPFL_ACT_24B_B_VAL_S	0
+#define CPFL_ACT_24B_B_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S)
+
+#define CPFL_ACT_OP_24B_B	((uint32_t)1 << CPFL_ACT_OP_24B_B_S)
+
+#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_SET_MD	0
+#define CPFL_ACT_24B_INDEX_RANGE_CHECK	6
+#define CPFL_ACT_24B_SET_MD_SLOTS	6
+
+/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */
+/* 8-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD8_VAL_S	0
+#define CPFL_ACT_24B_SET_MD8_VAL_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S)
+#define CPFL_ACT_24B_SET_MD8_MASK_S	8
+#define CPFL_ACT_24B_SET_MD8_MASK_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S)
+#define CPFL_ACT_24B_SET_MD8_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD8_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S)
+/* 16-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD16_VAL_S	0
+#define CPFL_ACT_24B_SET_MD16_VAL_M	\
+	CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_L_S	16 /* For chained action */
+#define CPFL_ACT_24B_SET_MD16_MASK_L_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_H_SR	8
+#define CPFL_ACT_24B_SET_MD16_MASK_H_M	0xff
+#define CPFL_ACT_24B_SET_MD16_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD16_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD16		((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_SET_MD32_VAL_L_M	CPFL_MAKE_MASK32(24, 0)
+
+#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX		15
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX	7
+#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX	15
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX	7
+
+/* RANGE_CHECK Action */
+enum cpfl_rule_act_rc_mode {
+	CPFL_RULE_ACT_RC_1_RANGE = 0,
+	CPFL_RULE_ACT_RC_2_RANGES = 1,
+	CPFL_RULE_ACT_RC_4_RANGES = 2,
+	CPFL_RULE_ACT_RC_8_RANGES = 3
+};
+
+#define CPFL_ACT_24B_RC_TBL_IDX_S	0
+#define CPFL_ACT_24B_RC_TBL_IDX_M	\
+	CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S)
+#define CPFL_ACT_24B_RC_START_BANK_S	13
+#define CPFL_ACT_24B_RC_START_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S)
+#define CPFL_ACT_24B_RC_MODE_S		16
+#define CPFL_ACT_24B_RC_MODE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S)
+#define CPFL_ACT_24B_RC_XTRACT_PROF_S	18
+#define CPFL_ACT_24B_RC_XTRACT_PROF_M	\
+	CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S)
+
+#define CPFL_ACT_24B_RC_TBL_INDEX_CNT	(8 * 1024)
+#define CPFL_ACT_24B_RC_BANK_CNT	8
+#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT	64
+
+/*******************************************************************************
+ * 24-Bit Chained Auxiliary Actions
+ ******************************************************************************/
+
+/* TODO: HAS is being updated.  Revise the order of chained and base action
+ * when the HAS has it finalized.
+ */
+/**
+ * 24-Bit Chained SET_MD Actions
+ *
+ * Chained SET_MD actions consume two consecutive action sets.  The first one is
+ * the chained AUX action set.  The second one is the base/parent action set.
+ * Chained SET_MD actions can add and/or update metadata structure with IDs from
+ * 0 to 31 while the non-chained SET_MD variants can only update existing meta-
+ * data IDs below 16.
+ */
+
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S	8
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M	\
+	CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD_AUX_ADD		((uint32_t)1 << 15)
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S	16
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_S		0
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_M		\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S	0
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S)
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR	24 /* Upper 8 bits of MD32 */
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M	0xff
+
+#define CPFL_ACT_TYPE_CHAIN_DATA_S	29
+#define CPFL_ACT_TYPE_CHAIN_DATA_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S)
+#define CPFL_ACT_TYPE_CHAIN_DATA	((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_OP_S	21
+#define CPFL_ACT_24B_SET_MD_OP_8B	((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_16B	((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_32B	((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \
+	(CPFL_ACT_TYPE_CHAIN_DATA | (op) | \
+	 (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \
+	 (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \
+	 (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_DATA_M))
+
+/*******************************************************************************
+ * 1-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_drop - Encode a 1-bit DROP action
+ *
+ * The DROP action has precedence over the DEFER_DOP action.
+ * Affect of ACT_COMMIT action on the DROP action:
+ *  - CPFL_ACT_COMMIT_ALL: Packet is dropped.
+ *  - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped.
+ *  - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped.  Recirculation is canceled.
+ *  - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not
+ *    dropped. Recirculation continues.
+ *
+ * Once a DROP action is set, it cannot be reverted during the classification
+ * process of a network packet.
+ */
+static inline union cpfl_action_set
+cpfl_act_drop(uint8_t prec)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1);
+	return a;
+}
+
+/**
+ * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action
+ * An ACT_COMMIT action specifies if and when all actions are committed.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M,
+				  (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S);
+	return a;
+}
+
+/*******************************************************************************
+ * 8-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_meta - Encode an 8-bit MOD_META action
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_meta(uint8_t prec, uint8_t prof)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META,
+				  CPFL_ACT_8B_MOD_META_VALID | prof);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 16-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI)
+ *
+ * This encodes the "Forward to Single VSI" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    vsi >= CPFL_ACT_16B_FWD_VSI_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port)
+ *
+ * This encodes the "Forward to a port" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    port >= CPFL_ACT_16B_FWD_PORT_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 24-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action
+ *
+ * This MOD_ADDR specifies the index of the MOD content entry an accompanying
+ * MOD_PROFILE action uses.  Some MOD_PROFILE actions may need to use extra
+ * information from a Modify content entry, and requires an accompanying
+ * MOD_ADDR action.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR,
+				     mod_addr);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant)
+ *
+ * This action is a "Forward to a single queue" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q,
+			bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region)
+ *
+ * This action is a "Forward to a queue region" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base,
+			       uint8_t q_rgn_bits, bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT ||
+	    q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base |
+		((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action
+ *
+ * This action specifies a Modify profile to use for modifying the network
+ * packet being classified.  In addition, it also provides a hint to whether
+ * or not an accompanied MOD_ADDR action is expected and should be prefetched.
+ *
+ * There is only one MOD_PROFILE action slot.  If multiple classification blocks
+ * emit this action, the precedence value and auxiliary precedence value will be
+ * used to select one with higher precedence.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus,
+		     bool miss_prepend, enum cpfl_act_mod_profile_hint hint)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) ||
+	    prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT ||
+	    ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_MOD_PROFILE_VALID |
+		(((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) &
+			CPFL_ACT_24B_MOD_PROFILE_HINT_M) |
+		(((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) &
+			CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) |
+		((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S);
+	if (append_act_bus)
+		val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS;
+	if (miss_prepend)
+		val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND;
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_meter - Encode a 24-bit METER action
+ *
+ * Return NOP if any given input parameter is invalid.
+ *
+ * A bank can only be used by one of the METER action slots.  If multiple METER
+ * actions select the same bank, the action with the highest action slot wins.
+ * In Policer mode, METER actions at the higher indexes have precedence over
+ * ones at lower indexes.
+ */
+static inline union cpfl_action_set
+cpfl_act_meter(uint8_t slot, uint8_t prec, uint32_t idx, uint8_t bank)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS  ||
+	    idx >= CPFL_ACT_24B_METER_INDEX_CNT ||
+	    bank >= CPFL_ACT_24B_METER_BANK_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_METER_VALID |
+		(uint32_t)idx << CPFL_ACT_24B_METER_INDEX_S |
+		(uint32_t)bank << CPFL_ACT_24B_METER_BANK_S;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot,
+				     val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot
+ *
+ * This SET_MD action sets/updates a byte of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 bytes of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX ||
+	    off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) |
+		((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) |
+		((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot
+ *
+ * This SET_MD action sets/updates a word of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 words of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX ||
+	    word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) |
+		((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) |
+		((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot
+ *
+ * This SET_MD action sets/updates a dword of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action is made up of 2 chained
+ * action sets.  The chained action set is the first.  The base/parent action
+ * sets is the second.
+ */
+static inline void
+cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid,
+		      uint8_t off, uint32_t val)
+{
+	if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) ||
+	    mid >= CPFL_METADATA_ID_CNT ||
+	    (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) {
+		ext->acts[0] = cpfl_act_nop();
+		ext->acts[1] = cpfl_act_nop();
+	} else {
+		uint32_t tmp;
+
+		/* Chained action set comes first */
+		tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR;
+		ext->acts[0].data =
+			CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B,
+						     mid, off, tmp);
+
+		/* Lower 24 bits of value */
+		tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M;
+		ext->acts[1].data =
+			CPFL_ACT_MAKE_24B_B(prec,
+					    CPFL_ACT_24B_INDEX_SET_MD + slot,
+					    tmp);
+	}
+}
+
+#endif /* _CPFL_ACTIONS_H_ */
diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
new file mode 100644
index 0000000000..476c78f235
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include "cpfl_controlq.h"
+#include "base/idpf_controlq.h"
+#include "rte_common.h"
+
+/**
+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
+ * @qinfo: pointer to create control queue info struct
+ *
+ * Verify that DMA parameter of each DMA memory struct is present and
+ * consistent with control queue parameters
+ */
+static inline int
+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
+{
+	struct idpf_dma_mem *ring = &qinfo->ring_mem;
+	struct idpf_dma_mem *buf = &qinfo->buf_mem;
+
+	if (!ring->va || !ring->size)
+		return -EBADR;
+
+	if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
+		return -EINVAL;
+
+	/* no need for buffer checks for TX queues */
+	if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
+		return 0;
+
+	if (!buf->va || !buf->size)
+		return -EBADR;
+
+	/* accommodate different types of rx ring buffer sizes */
+	if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
+	     buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
+	    (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
+	     buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
+		return -EBADR;
+
+	return 0;
+}
+
+/**
+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ * @qinfo: pointer to create queue info struct
+ *
+ * The CP takes care of all DMA memory allocations. Store the allocated memory
+ * information for the descriptor ring and buffers. If the memory for either the
+ * descriptor ring or the buffers is not allocated properly and/or inconsistent
+ * with the control queue parameters, this routine will free the memory for
+ * both the descriptors and the buffers
+ */
+int
+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
+			 struct cpfl_ctlq_create_info *qinfo)
+{
+	int ret_code = 0;
+	unsigned int elem_size;
+	int i = 0;
+
+	ret_code = cpfl_check_dma_mem_parameters(qinfo);
+	if (ret_code)
+		/* TODO: Log an error message per CP */
+		goto err;
+
+	cq->desc_ring.va = qinfo->ring_mem.va;
+	cq->desc_ring.pa = qinfo->ring_mem.pa;
+	cq->desc_ring.size = qinfo->ring_mem.size;
+
+	switch (cq->cq_type) {
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_RDMA_RX:
+		/* Only receive queues will have allocated buffers
+		 * during init.  CP allocates one big chunk of DMA
+		 * region who size is equal to ring_len * buff_size.
+		 * In CPFLib, the block gets broken down to multiple
+		 * smaller blocks that actually gets programmed in the hardware.
+		 */
+
+		cq->bi.rx_buff = (struct idpf_dma_mem **)
+			idpf_calloc(hw, cq->ring_size,
+				    sizeof(struct idpf_dma_mem *));
+		if (!cq->bi.rx_buff) {
+			ret_code = -ENOMEM;
+			/* TODO: Log an error message per CP */
+			goto err;
+		}
+
+		elem_size = qinfo->buf_size;
+		for (i = 0; i < cq->ring_size; i++) {
+			cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
+					    (hw, 1,
+					     sizeof(struct idpf_dma_mem));
+			if (!cq->bi.rx_buff[i]) {
+				ret_code = -ENOMEM;
+				goto free_rx_buffs;
+			}
+			cq->bi.rx_buff[i]->va =
+			    (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));
+			cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
+					       (i * elem_size);
+			cq->bi.rx_buff[i]->size = elem_size;
+		}
+		break;
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_RDMA_TX:
+	case IDPF_CTLQ_TYPE_RDMA_COMPL:
+		break;
+	default:
+		ret_code = -EBADR;
+	}
+
+	return ret_code;
+
+free_rx_buffs:
+	i--;
+	for (; i >= 0; i--)
+		idpf_free(hw, cq->bi.rx_buff[i]);
+
+	if (!cq->bi.rx_buff)
+		idpf_free(hw, cq->bi.rx_buff);
+
+err:
+	return ret_code;
+}
+
+/**
+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	for (i = 0; i < cq->ring_size; i++) {
+		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+		/* No buffer to post to descriptor, continue */
+		if (!bi)
+			continue;
+
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+		desc->opcode = 0;
+		desc->datalen = CPU_TO_LE16(bi->size);
+		desc->ret_val = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+		desc->params.indirect.param0 = 0;
+		desc->params.indirect.param1 = 0;
+	}
+}
+
+/**
+ * cpfl_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
+{
+	/* set control queue registers in our local struct */
+	cq->reg.head = q_create_info->reg.head;
+	cq->reg.tail = q_create_info->reg.tail;
+	cq->reg.len = q_create_info->reg.len;
+	cq->reg.bah = q_create_info->reg.bah;
+	cq->reg.bal = q_create_info->reg.bal;
+	cq->reg.len_mask = q_create_info->reg.len_mask;
+	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+	cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * cpfl_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
+{
+	/* Update tail to post pre-allocated buffers for rx queues */
+	if (is_rxq)
+		wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
+
+	/* For non-Mailbox control queues only TAIL need to be set */
+	if (cq->q_id != -1)
+		return;
+
+	/* Clear Head for both send or receive */
+	wr32(hw, cq->reg.head, 0);
+
+	/* set starting point */
+	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
+ * @hw: context info for the callback
+ * @cq: pointer to the specific control queue
+ *
+ * DMA buffers are released by the CP itself
+ */
+static void
+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
+{
+	int i;
+
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
+	    cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
+		for (i = 0; i < cq->ring_size; i++)
+			idpf_free(hw, cq->bi.rx_buff[i]);
+		/* free the buffer header */
+		idpf_free(hw, cq->bi.rx_buff);
+	} else {
+		idpf_free(hw, cq->bi.tx_msg);
+	}
+}
+
+/**
+ * cpfl_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ */
+int
+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+	      struct idpf_ctlq_info **cq_out)
+{
+	struct idpf_ctlq_info *cq;
+	bool is_rxq = false;
+	int status = 0;
+
+	if (!qinfo->len || !qinfo->buf_size ||
+	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+		return -EINVAL;
+
+	cq = (struct idpf_ctlq_info *)
+	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+
+	if (!cq)
+		return -ENOMEM;
+
+	cq->cq_type = qinfo->type;
+	cq->q_id = qinfo->id;
+	cq->buf_size = qinfo->buf_size;
+	cq->ring_size = qinfo->len;
+
+	cq->next_to_use = 0;
+	cq->next_to_clean = 0;
+	cq->next_to_post = cq->ring_size - 1;
+
+	switch (qinfo->type) {
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+		is_rxq = true;
+		/* fallthrough */
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+		status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
+		break;
+
+	default:
+		status = -EINVAL;
+		break;
+	}
+
+	if (status)
+		goto init_free_q;
+
+	if (is_rxq) {
+		cpfl_ctlq_init_rxq_bufs(cq);
+	} else {
+		/* Allocate the array of msg pointers for TX queues */
+		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
+			idpf_calloc(hw, qinfo->len,
+				    sizeof(struct idpf_ctlq_msg *));
+		if (!cq->bi.tx_msg) {
+			status = -ENOMEM;
+			goto init_dealloc_q_mem;
+		}
+	}
+
+	cpfl_ctlq_setup_regs(cq, qinfo);
+
+	cpfl_ctlq_init_regs(hw, cq, is_rxq);
+
+	idpf_init_lock(&cq->cq_lock);
+
+	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
+
+	*cq_out = cq;
+	return status;
+
+init_dealloc_q_mem:
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+	idpf_free(hw, cq);
+	cq = NULL;
+
+	return status;
+}
+
+int
+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+		    struct idpf_ctlq_info **cq)
+{
+	return cpfl_ctlq_add(hw, qinfo, cq);
+}
+
+/**
+ * cpfl_ctlq_shutdown - shutdown the CQ
+ * The main shutdown routine for any controq queue
+ */
+static void
+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (!cq->ring_size)
+		goto shutdown_sq_out;
+
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+
+	/* Set ring_size to 0 to indicate uninitialized queue */
+	cq->ring_size = 0;
+
+shutdown_sq_out:
+	idpf_release_lock(&cq->cq_lock);
+	idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * cpfl_ctlq_remove - deallocate and remove specified control queue
+ */
+static void
+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	LIST_REMOVE(cq, cq_list);
+	cpfl_ctlq_shutdown(hw, cq);
+	idpf_free(hw, cq);
+}
+
+void
+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	cpfl_ctlq_remove(hw, cq);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
new file mode 100644
index 0000000000..930d717f63
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CONTROLQ_H_
+#define _CPFL_CONTROLQ_H_
+
+#include "base/idpf_osdep.h"
+#include "base/idpf_controlq_api.h"
+
+#define CPFL_CTLQ_DESCRIPTOR_SIZE	32
+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE	4096
+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE	256
+#define CPFL_DFLT_MBX_RING_LEN		512
+#define CPFL_CFGQ_RING_LEN		512
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct cpfl_ctlq_create_info {
+	enum idpf_ctlq_type type;
+	int id; /* absolute queue offset passed as input
+		 * -1 for default mailbox if present
+		 */
+	uint16_t len; /* Queue length passed as input */
+	uint16_t buf_size; /* buffer size passed as input */
+	uint64_t base_address; /* output, HPA of the Queue start  */
+	struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+	/* Pass down previously allocated descriptor ring and buffer memory
+	 * for each control queue to be created
+	 */
+	struct idpf_dma_mem ring_mem;
+	/* The CP will allocate one large buffer that the CPFlib will piece
+	 * into individual buffers for each descriptor
+	 */
+	struct idpf_dma_mem buf_mem;
+
+	int ext_info_size;
+	void *ext_info; /* Specific to q type */
+};
+
+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
+			     struct idpf_ctlq_info *cq,
+			     struct cpfl_ctlq_create_info *qinfo);
+int cpfl_ctlq_add(struct idpf_hw *hw,
+		  struct cpfl_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq);
+int cpfl_vport_ctlq_add(struct idpf_hw *hw,
+			struct cpfl_ctlq_create_info *qinfo,
+			struct idpf_ctlq_info **cq);
+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+#endif
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 44418ce325..88e2ecf754 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1698,6 +1698,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 		return;
 	}
 
+	/* ignore if it is ctrl vport */
+	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
+		return;
+
 	vport = cpfl_find_vport(adapter, vc_event->vport_id);
 	if (!vport) {
 		PMD_DRV_LOG(ERR, "Can't find vport.");
@@ -1893,6 +1897,262 @@ cpfl_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 }
 
+static int
+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	ret = cpfl_config_ctlq_tx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
+		return ret;
+	}
+
+	ret = cpfl_config_ctlq_rx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
+		return ret;
+	}
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	int i;
+
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (adapter->ctlqp[i])
+			cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+}
+
+static int
+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_ctlq_info *cfg_cq;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		cfg_cq = NULL;
+		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
+					  &adapter->cfgq_info[i],
+					  &cfg_cq);
+		if (ret || !cfg_cq) {
+			PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d",
+				    adapter->cfgq_info[i].id);
+			cpfl_remove_cfgqs(adapter);
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
+			    adapter->cfgq_info[i].id);
+		adapter->ctlqp[i] = cfg_cq;
+	}
+
+	return ret;
+}
+
+#define CPFL_CFGQ_RING_LEN		512
+#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
+#define CPFL_CFGQ_BUFFER_SIZE		256
+#define CPFL_CFGQ_RING_SIZE		512
+
+static int
+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	struct cpfl_vport *vport;
+	int i, err;
+	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);
+	uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;
+
+	vport = &adapter->ctrl_vport;
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (i % 2 == 0) {
+			/* Setup Tx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +
+				i / 2 * vport->base.chunks_info.tx_qtail_spacing;
+		} else {
+			/* Setup Rx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +
+				i / 2 * vport->base.chunks_info.rx_qtail_spacing;
+			if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,
+						buf_size)) {
+				err = -ENOMEM;
+				goto free_mem;
+			}
+		}
+		if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,
+					ring_size)) {
+			err = -ENOMEM;
+			goto free_mem;
+		}
+	}
+	return 0;
+free_mem:
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+	return err;
+}
+
+static int
+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_create_vport *vport_info =
+		(struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;
+	int i;
+
+	vport->itf.adapter = adapter;
+	vport->base.adapter = &adapter->base;
+	vport->base.vport_id = vport_info->vport_id;
+
+	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+		if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {
+			vport->base.chunks_info.tx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.tx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.tx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {
+			vport->base.chunks_info.rx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.rx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.rx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else {
+			PMD_INIT_LOG(ERR, "Unsupported chunk type");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)
+{
+	cpfl_stop_cfgqs(adapter);
+	cpfl_remove_cfgqs(adapter);
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+}
+
+static int
+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_vc_create_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to create control vport");
+		return ret;
+	}
+
+	ret = cpfl_init_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init control vport");
+		goto err_init_ctrl_vport;
+	}
+
+	ret = cpfl_cfgq_setup(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control queues");
+		goto err_cfgq_setup;
+	}
+
+	ret = cpfl_add_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to add control queues");
+		goto err_add_cfgq;
+	}
+
+	ret = cpfl_start_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to start control queues");
+		goto err_start_cfgqs;
+	}
+
+	return 0;
+
+err_start_cfgqs:
+	cpfl_stop_cfgqs(adapter);
+err_add_cfgq:
+	cpfl_remove_cfgqs(adapter);
+err_cfgq_setup:
+err_init_ctrl_vport:
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+
+	return ret;
+}
+
 static struct virtchnl2_get_capabilities req_caps = {
 	.csum_caps =
 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
@@ -2060,6 +2320,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+	ret = cpfl_ctrl_path_open(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control path");
+		goto err_create_ctrl_vport;
+	}
+
 #ifdef CPFL_FLOW_JSON_SUPPORT
 	ret = cpfl_flow_init(adapter);
 	if (ret) {
@@ -2076,7 +2342,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 #ifdef CPFL_FLOW_JSON_SUPPORT
 err_flow_init:
+	cpfl_ctrl_path_close(adapter);
 #endif
+err_create_ctrl_vport:
+	rte_free(adapter->vports);
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2315,6 +2584,7 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 #ifdef CPFL_FLOW_JSON_SUPPORT
 	cpfl_flow_uninit(adapter);
 #endif
+	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 69bf32cfbd..7f83d170d7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -22,6 +22,7 @@
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
 #include "cpfl_representor.h"
+#include "cpfl_controlq.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -82,6 +83,10 @@
 #define CPFL_META_CHUNK_LENGTH	1024
 #define CPFL_META_LENGTH	32
 
+#define CPFL_RX_CFGQ_NUM	4
+#define CPFL_TX_CFGQ_NUM	4
+#define CPFL_CFGQ_NUM		8
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -212,6 +217,12 @@ struct cpfl_adapter_ext {
 	struct cpfl_flow_js_parser *flow_parser;
 
 	struct cpfl_metadata meta;
+
+	/* ctrl vport and ctrl queues. */
+	struct cpfl_vport ctrl_vport;
+	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
+	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
+	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -226,6 +237,9 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 			   struct cpchnl2_vport_id *vport_id,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_info_response *response);
+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c
new file mode 100644
index 0000000000..eefae1767c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.c
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include <base/idpf_controlq.h>
+#include <stdint.h>
+#include "cpfl_rules.h"
+
+ /**
+  * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor
+  */
+static inline uint64_t
+cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
+{
+	uint64_t context = 0;
+
+	switch (cmn_cfg->opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		/* fallthrough */
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+	case cpfl_ctlq_sem_add_rule:
+	case cpfl_ctlq_sem_del_rule:
+	case cpfl_ctlq_sem_query_rule:
+	case cpfl_ctlq_sem_update_rule:
+		context |= SHIFT_VAL64(cmn_cfg->time_sel,
+				       MEV_RULE_TIME_SEL);
+		context |= SHIFT_VAL64(cmn_cfg->time_sel_val,
+				       MEV_RULE_TIME_SEL_VAL);
+		context |= SHIFT_VAL64(cmn_cfg->host_id,
+				       MEV_RULE_HOST_ID);
+		context |= SHIFT_VAL64(cmn_cfg->port_num,
+				       MEV_RULE_PORT_NUM);
+		context |= SHIFT_VAL64(cmn_cfg->resp_req,
+				       MEV_RULE_RESP_REQ);
+		context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru,
+				       MEV_RULE_CACHE_WR_THRU);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc_ctx - get bit context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
+{
+	uint64_t context = 0;
+
+	context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common);
+
+	switch (cfg_data->common.opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size,
+				       MEV_RULE_MOD_OBJ_SIZE);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content,
+				       MEV_RULE_PIN_MOD_CONTENT);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.index,
+				       MEV_RULE_MOD_INDEX);
+		break;
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
+				       MEV_RULE_OBJ_ID);
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
+				       MEV_RULE_OBJ_ADDR);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc - build descriptor data from rule config data
+ *
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg)
+{
+	uint64_t context;
+	uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0];
+
+	context = cpfl_prep_rule_desc_ctx(cfg_data);
+	*ctlq_ctx = CPU_TO_LE64(context);
+	memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t));
+	ctlq_msg->opcode = (uint16_t)cfg_data->common.opc;
+	ctlq_msg->data_len = cfg_data->common.buf_len;
+	ctlq_msg->status = 0;
+	ctlq_msg->ctx.indirect.payload = cfg_data->common.payload;
+}
+
+/**
+ * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob)
+{
+	uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions;
+	const uint32_t *act_src = (const uint32_t *)act_bytes;
+	uint32_t i;
+
+	idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
+	idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len,
+		    CPFL_NONDMA_TO_DMA);
+
+	for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
+		*act_dst++ = CPU_TO_LE32(*act_src++);
+
+	*((uint16_t *)&rule_blob->sem_rule.cfg_ctrl) = CPU_TO_LE16(cfg_ctrl);
+}
diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h
new file mode 100644
index 0000000000..d23eae8e91
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_RULES_API_H_
+#define _CPFL_RULES_API_H_
+
+#include <base/idpf_controlq_api.h>
+#include "cpfl_actions.h"
+#include "cpfl_controlq.h"
+
+/* Common Bit Mask Macros */
+#define CPFL_BIT(b)			(1 << (b))
+
+#define MAKE_MASK(type, mask, shift)	((u##type) (mask) << (shift))
+#define SHIFT_VAL_LT(type, val, field)		\
+		(((u##type)(val) << field##_S) & field##_M)
+#define SHIFT_VAL_RT(type, val, field)		\
+		(((u##type)(val) & field##_M) >> field##_S)
+
+#define MAKE_MASK_VAL(type, bit_len)	(((u##type)0x01 << (bit_len)) - 1)
+#define MAKE_MASK_VAL16(bit_len)	MAKE_MASK_VAL(16, bit_len)
+#define MAKE_MASK_VAL64(bit_len)	MAKE_MASK_VAL(64, bit_len)
+
+#define MAKE_MASK64(mask, shift)	MAKE_MASK(64, mask, shift)
+#define MAKE_MASK16(mask, shift)	MAKE_MASK(16, mask, shift)
+#define MAKE_MASK32(mask, shift)	MAKE_MASK(32, mask, shift)
+
+/* Make masks with bit length and left-shifting count */
+#define MAKE_SMASK(type, bits, shift)	\
+	((((u##type)1 << (bits)) - 1) << (shift))
+#define MAKE_SMASK64(bits, shift)	MAKE_SMASK(64, bits, shift)
+#define MAKE_SMASK32(bits, shift)	MAKE_SMASK(32, bits, shift)
+#define MAKE_SMASK16(bits, shift)	MAKE_SMASK(16, bits, shift)
+
+#define SHIFT_VAL64(val, field)		SHIFT_VAL_LT(64, val, field)
+#define SHIFT_VAL32(val, field)		SHIFT_VAL_LT(32, val, field)
+#define SHIFT_VAL16(val, field)		SHIFT_VAL_LT(16, val, field)
+
+/* Rule Config queue opcodes */
+enum cpfl_ctlq_rule_cfg_opc {
+	cpfl_ctlq_sem_add_rule				= 0x1303,
+	cpfl_ctlq_sem_update_rule			= 0x1304,
+	cpfl_ctlq_sem_del_rule				= 0x1305,
+	cpfl_ctlq_sem_query_rule			= 0x1306,
+	cpfl_ctlq_sem_query_rule_hash_addr		= 0x1307,
+	cpfl_ctlq_sem_query_del_rule_hash_addr		= 0x1308,
+
+	cpfl_ctlq_mod_add_update_rule			= 0x1360,
+	cpfl_ctlq_mod_query_rule			= 0x1361,
+};
+
+enum cpfl_cfg_pkt_error_code {
+	CPFL_CFG_PKT_ERR_OK = 0,
+	CPFL_CFG_PKT_ERR_ESRCH = 1,     /* Bad opcode */
+	CPFL_CFG_PKT_ERR_EEXIST = 2,    /* Entry Already exists */
+	CPFL_CFG_PKT_ERR_ENOSPC = 4,    /* No space left in the table*/
+	CPFL_CFG_PKT_ERR_ERANGE = 5,    /* Parameter out of range */
+	CPFL_CFG_PKT_ERR_ESBCOMP = 6,   /* Completion Error */
+	CPFL_CFG_PKT_ERR_ENOPIN = 7,    /* Entry cannot be pinned in cache */
+	CPFL_CFG_PKT_ERR_ENOTFND = 8,   /* Entry Not exists */
+	CPFL_CFG_PKT_ERR_EMAXCOL = 9    /* Max Hash Collision */
+};
+
+/* macros for creating context for rule descriptor */
+#define MEV_RULE_VSI_ID_S		0
+#define MEV_RULE_VSI_ID_M		\
+		MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S)
+
+#define MEV_RULE_TIME_SEL_S		13
+#define MEV_RULE_TIME_SEL_M		\
+		MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S)
+
+#define MEV_RULE_TIME_SEL_VAL_S		15
+#define MEV_RULE_TIME_SEL_VAL_M		\
+		MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S)
+
+#define MEV_RULE_PORT_NUM_S		16
+#define MEV_RULE_HOST_ID_S		18
+#define MEV_RULE_PORT_NUM_M		\
+		MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S)
+#define MEV_RULE_HOST_ID_M		\
+		MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S)
+
+#define MEV_RULE_CACHE_WR_THRU_S	21
+#define MEV_RULE_CACHE_WR_THRU_M	\
+		MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S)
+
+#define MEV_RULE_RESP_REQ_S		22
+#define MEV_RULE_RESP_REQ_M		\
+		MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S)
+#define MEV_RULE_OBJ_ADDR_S		24
+#define MEV_RULE_OBJ_ADDR_M		\
+		MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S)
+#define MEV_RULE_OBJ_ID_S		59
+#define MEV_RULE_OBJ_ID_M		\
+		MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S)
+
+/* macros for creating CFG_CTRL for sem/lem rule blob */
+#define MEV_RULE_CFG_CTRL_PROF_ID_S			0
+#define MEV_RULE_CFG_CTRL_PROF_ID_M			\
+		MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S)
+
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S		11
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M		\
+		MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S)
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_S		13
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S)
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S	14
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M	\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S)
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S		15
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S)
+
+/**
+ * macro to build the CFG_CTRL for rule packet data, which is one of
+ * cpfl_prep_sem_rule_blob()'s input parameter.
+ */
+ /* build SEM CFG_CTRL*/
+#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id,		       \
+				       pin_to_cache, fixed_fetch)	       \
+		(SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID)	     | \
+		 SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID)   | \
+		 SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE)    | \
+		 SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH))
+
+/* build LEM CFG_CTRL*/
+#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror)    \
+		(SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID)             | \
+		 SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE)      | \
+		 SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR))
+
+/* macros for creating mod content config packets */
+#define MEV_RULE_MOD_INDEX_S		24
+#define MEV_RULE_MOD_INDEX_M		\
+		MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S)
+
+#define MEV_RULE_PIN_MOD_CONTENT_S	62
+#define MEV_RULE_PIN_MOD_CONTENT_M	\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S)
+#define MEV_RULE_MOD_OBJ_SIZE_S		63
+#define MEV_RULE_MOD_OBJ_SIZE_M		\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S)
+
+/**
+ * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM
+ * note: The key may be in mixed big/little endian format, the rest of members
+ * are in little endian
+ */
+struct cpfl_sem_rule_cfg_pkt {
+#define MEV_SEM_RULE_KEY_SIZE 128
+	uint8_t key[MEV_SEM_RULE_KEY_SIZE];
+
+#define MEV_SEM_RULE_ACT_SIZE 72
+	uint8_t actions[MEV_SEM_RULE_ACT_SIZE];
+
+	/* Bit(s):
+	 * 10:0 : PROFILE_ID
+	 * 12:11: SUB_PROF_ID (used for SEM only)
+	 * 13   : pin the SEM key content into the cache
+	 * 14   : Reserved
+	 * 15   : Fixed_fetch
+	 */
+	uint8_t cfg_ctrl[2];
+
+	/* Bit(s):
+	 * 0:     valid
+	 * 15:1:  Hints
+	 * 26:16: PROFILE_ID, the profile associated with the entry
+	 * 31:27: PF
+	 * 55:32: FLOW ID (assigned by HW)
+	 * 63:56: EPOCH
+	 */
+	uint8_t ctrl_word[8];
+	uint8_t padding[46];
+};
+
+/**
+ * union cpfl_rule_cfg_pkt_record - Describes rule data blob
+ */
+union cpfl_rule_cfg_pkt_record {
+	struct cpfl_sem_rule_cfg_pkt sem_rule;
+	uint8_t pkt_data[256];
+	uint8_t mod_blob[256];
+};
+
+/**
+ * cpfl_rule_query_addr - LEM/SEM Rule Query Address structure
+ */
+struct cpfl_rule_query_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_query_del_addr - Rule Query and Delete Address
+ */
+struct cpfl_rule_query_del_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_mod_content - MOD Rule Content
+ */
+struct cpfl_rule_mod_content {
+	uint8_t	obj_size;
+	uint8_t	pin_content;
+	uint32_t	index;
+};
+
+/**
+ * cpfl_rule_cfg_data_common - data struct for all rule opcodes
+ *note: some rules may only require part of structure
+ */
+struct cpfl_rule_cfg_data_common {
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	uint64_t	cookie;
+	uint16_t	vsi_id;
+	uint8_t	port_num;
+	uint8_t	host_id;
+	uint8_t	time_sel;
+	uint8_t	time_sel_val;
+	uint8_t	cache_wr_thru;
+	uint8_t	resp_req;
+	uint32_t	ret_val;
+	uint16_t	buf_len;
+	struct idpf_dma_mem *payload;
+};
+
+/**
+ * cpfl_rule_cfg_data - rule config data
+ * note: Before sending rule to HW, caller needs to fill
+ *       in this struct then call cpfl_prep_rule_desc().
+ */
+struct cpfl_rule_cfg_data {
+	struct cpfl_rule_cfg_data_common common;
+	union {
+		struct cpfl_rule_query_addr query_addr;
+		struct cpfl_rule_query_del_addr query_del_addr;
+		struct cpfl_rule_mod_content mod_content;
+	} ext;
+};
+
+/**
+ * cpfl_fill_rule_mod_content - fill info for mod content
+ */
+static inline void
+cpfl_fill_rule_mod_content(uint8_t mod_obj_size,
+			   uint8_t pin_mod_content,
+			   uint32_t mod_index,
+			   struct cpfl_rule_mod_content *mod_content)
+{
+	mod_content->obj_size = mod_obj_size;
+	mod_content->pin_content = pin_mod_content;
+	mod_content->index = mod_index;
+}
+
+/**
+ * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes
+ * note: call this function before calls cpfl_prep_rule_desc()
+ */
+static inline void
+cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc,
+			       uint64_t cookie,
+			       uint16_t vsi_id,
+			       uint8_t port_num,
+			       uint8_t host_id,
+			       uint8_t time_sel,
+			       uint8_t time_sel_val,
+			       uint8_t cache_wr_thru,
+			       uint8_t resp_req,
+			       uint16_t payload_len,
+			       struct idpf_dma_mem *payload,
+			       struct cpfl_rule_cfg_data_common *cfg_cmn)
+{
+	cfg_cmn->opc = opc;
+	cfg_cmn->cookie = cookie;
+	cfg_cmn->vsi_id = vsi_id;
+	cfg_cmn->port_num = port_num;
+	cfg_cmn->resp_req = resp_req;
+	cfg_cmn->ret_val = 0;
+	cfg_cmn->host_id = host_id;
+	cfg_cmn->time_sel = time_sel;
+	cfg_cmn->time_sel_val = time_sel_val;
+	cfg_cmn->cache_wr_thru = cache_wr_thru;
+
+	cfg_cmn->buf_len = payload_len;
+	cfg_cmn->payload = payload;
+}
+
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg);
+
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob);
+
+#endif /* _CPFL_RULES_API_H_ */
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
index a21a4a451f..932840a972 100644
--- a/drivers/net/cpfl/cpfl_vchnl.c
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 
 	return 0;
 }
+
+int
+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct virtchnl2_create_vport vport_msg;
+	struct idpf_cmd_info args;
+	int err = -1;
+
+	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
+	vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+	vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
+	vport_msg.num_tx_complq = 0;
+	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
+	vport_msg.num_rx_bufq = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+	args.in_args = (uint8_t *)&vport_msg;
+	args.in_args_size = sizeof(vport_msg);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+		return err;
+	}
+
+	rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
+		   IDPF_DFLT_MBX_BUF_SIZE);
+	return err;
+}
+
+int
+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_RX_CFGQ_NUM;
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (!vc_rxqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->base.vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		rxq_info = &vc_rxqs->qinfo[i];
+		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
+		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;
+		rxq_info->max_pkt_size = vport->base.max_pkt_len;
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_rxqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_TX_CFGQ_NUM;
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (!vc_txqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->base.vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		txq_info = &vc_txqs->qinfo[i];
+		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
+		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_txqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 9f1818f8dc..53eb5aecad 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -18,6 +18,8 @@ sources = files(
         'cpfl_rxtx.c',
         'cpfl_vchnl.c',
         'cpfl_representor.c',
+        'cpfl_controlq.c',
+        'cpfl_rules.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v5 5/9] net/cpfl: add fxp rule module
  2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
                         ` (4 preceding siblings ...)
  2023-09-15 10:00       ` [PATCH v5 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
@ 2023-09-15 10:00       ` Zhang, Yuying
  2023-09-15 10:00       ` [PATCH v5 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
                         ` (3 subsequent siblings)
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-15 10:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Added low level fxp module for rule packing / creation / destroying.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_controlq.c | 424 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  24 ++
 drivers/net/cpfl/cpfl_ethdev.c   |  31 +++
 drivers/net/cpfl/cpfl_ethdev.h   |   6 +
 drivers/net/cpfl/cpfl_fxp_rule.c | 296 +++++++++++++++++++++
 drivers/net/cpfl/cpfl_fxp_rule.h |  68 +++++
 drivers/net/cpfl/meson.build     |   1 +
 7 files changed, 850 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h

diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
index 476c78f235..ed76282b0c 100644
--- a/drivers/net/cpfl/cpfl_controlq.c
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -331,6 +331,402 @@ cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
 	return status;
 }
 
+/**
+ * cpfl_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+	       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_desc *desc;
+	int num_desc_avail = 0;
+	int status = 0;
+	int i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	/* Ensure there are enough descriptors to send all messages */
+	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+		status = -ENOSPC;
+		goto sq_send_command_out;
+	}
+
+	for (i = 0; i < num_q_msg; i++) {
+		struct idpf_ctlq_msg *msg = &q_msg[i];
+		uint64_t msg_cookie;
+
+		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+		desc->opcode = CPU_TO_LE16(msg->opcode);
+		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+		msg_cookie = *(uint64_t *)&msg->cookie;
+		desc->cookie_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));
+		desc->cookie_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));
+		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+				IDPF_CTLQ_FLAG_HOST_ID_S);
+		if (msg->data_len) {
+			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+			desc->datalen |= CPU_TO_LE16(msg->data_len);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+			/* Update the address values in the desc with the pa
+			 * value for respective buffer
+			 */
+			desc->params.indirect.addr_high =
+				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+			desc->params.indirect.addr_low =
+				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		} else {
+			idpf_memcpy(&desc->params, msg->ctx.direct,
+				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		}
+
+		/* Store buffer info */
+		cq->bi.tx_msg[cq->next_to_use] = msg;
+		(cq->next_to_use)++;
+		if (cq->next_to_use == cq->ring_size)
+			cq->next_to_use = 0;
+	}
+
+	/* Force memory write to complete before letting hardware
+	 * know that there are new descriptors to fetch.
+	 */
+	idpf_wmb();
+	wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+	idpf_release_lock(&cq->cq_lock);
+
+	return status;
+}
+
+/**
+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
+ * back for the requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ * @force: (input) clean descriptors which were not done yet. Use with caution
+ * in kernel mode only
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+static int
+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		     struct idpf_ctlq_msg *msg_status[], bool force)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t i = 0, num_to_clean;
+	uint16_t ntc, desc_err;
+	int ret = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*clean_count == 0)
+		return 0;
+	if (*clean_count > cq->ring_size)
+		return -EINVAL;
+
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *clean_count;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		desc_err = LE16_TO_CPU(desc->ret_val);
+		if (desc_err) {
+			/* strip off FW internal code */
+			desc_err &= 0xff;
+		}
+
+		msg_status[i] = cq->bi.tx_msg[ntc];
+		if (!msg_status[i])
+			break;
+		msg_status[i]->status = desc_err;
+		cq->bi.tx_msg[ntc] = NULL;
+		/* Zero out any stale data */
+		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	}
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+
+	/* Return number of descriptors actually cleaned */
+	*clean_count = i;
+
+	return ret;
+}
+
+/**
+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		   struct idpf_ctlq_msg *msg_status[])
+{
+	return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
+}
+
+/**
+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t ntp = cq->next_to_post;
+	bool buffs_avail = false;
+	uint16_t tbp = ntp + 1;
+	int status = 0;
+	int i = 0;
+
+	if (*buff_count > cq->ring_size)
+		return -EINVAL;
+
+	if (*buff_count > 0)
+		buffs_avail = true;
+	idpf_acquire_lock(&cq->cq_lock);
+	if (tbp >= cq->ring_size)
+		tbp = 0;
+
+	if (tbp == cq->next_to_clean)
+		/* Nothing to do */
+		goto post_buffs_out;
+
+	/* Post buffers for as many as provided or up until the last one used */
+	while (ntp != cq->next_to_clean) {
+		desc = IDPF_CTLQ_DESC(cq, ntp);
+		if (cq->bi.rx_buff[ntp])
+			goto fill_desc;
+		if (!buffs_avail) {
+			/* If the caller hasn't given us any buffers or
+			 * there are none left, search the ring itself
+			 * for an available buffer to move to this
+			 * entry starting at the next entry in the ring
+			 */
+			tbp = ntp + 1;
+			/* Wrap ring if necessary */
+			if (tbp >= cq->ring_size)
+				tbp = 0;
+
+			while (tbp != cq->next_to_clean) {
+				if (cq->bi.rx_buff[tbp]) {
+					cq->bi.rx_buff[ntp] =
+						cq->bi.rx_buff[tbp];
+					cq->bi.rx_buff[tbp] = NULL;
+
+					/* Found a buffer, no need to
+					 * search anymore
+					 */
+					break;
+				}
+
+				/* Wrap ring if necessary */
+				tbp++;
+				if (tbp >= cq->ring_size)
+					tbp = 0;
+			}
+
+			if (tbp == cq->next_to_clean)
+				goto post_buffs_out;
+		} else {
+			/* Give back pointer to DMA buffer */
+			cq->bi.rx_buff[ntp] = buffs[i];
+			i++;
+
+			if (i >= *buff_count)
+				buffs_avail = false;
+		}
+
+fill_desc:
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+		/* Post buffers to descriptor */
+		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+		ntp++;
+		if (ntp == cq->ring_size)
+			ntp = 0;
+	}
+
+post_buffs_out:
+	/* Only update tail if buffers were actually posted */
+	if (cq->next_to_post != ntp) {
+		if (ntp)
+			/* Update next_to_post to ntp - 1 since current ntp
+			 * will not have a buffer
+			 */
+			cq->next_to_post = ntp - 1;
+		else
+			/* Wrap to end of end ring since current ntp is 0 */
+			cq->next_to_post = cq->ring_size - 1;
+
+		wr32(hw, cq->reg.tail, cq->next_to_post);
+	}
+
+	idpf_release_lock(&cq->cq_lock);
+	/* return the number of buffers that were not posted */
+	*buff_count = *buff_count - i;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+	       struct idpf_ctlq_msg *q_msg)
+{
+	uint16_t num_to_clean, ntc, ret_val, flags;
+	struct idpf_ctlq_desc *desc;
+	int ret_code = 0;
+	uint16_t i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*num_q_msg == 0)
+		return 0;
+	else if (*num_q_msg > cq->ring_size)
+		return -EINVAL;
+
+	/* take the lock before we start messing with the ring */
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *num_q_msg;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		flags = LE16_TO_CPU(desc->flags);
+		if (!(flags & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		ret_val = LE16_TO_CPU(desc->ret_val);
+		q_msg[i].vmvf_type = (flags &
+				     (IDPF_CTLQ_FLAG_FTYPE_VM |
+				      IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+				      IDPF_CTLQ_FLAG_FTYPE_S;
+
+		if (flags & IDPF_CTLQ_FLAG_ERR)
+			ret_code = -EBADMSG;
+
+		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
+		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
+		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+		q_msg[i].status = ret_val;
+
+		if (desc->datalen) {
+			idpf_memcpy(q_msg[i].ctx.indirect.context,
+				    &desc->params.indirect,
+				    IDPF_INDIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+
+			/* Assign pointer to dma buffer to ctlq_msg array
+			 * to be given to upper layer
+			 */
+			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+			/* Zero out pointer to DMA buffer info;
+			 * will be repopulated by post buffers API
+			 */
+			cq->bi.rx_buff[ntc] = NULL;
+		} else {
+			idpf_memcpy(q_msg[i].ctx.direct,
+				    desc->params.raw,
+				    IDPF_DIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+		}
+
+		/* Zero out stale data in descriptor */
+		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+			    IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	};
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+	*num_q_msg = i;
+	if (*num_q_msg == 0)
+		ret_code = -ENOMSG;
+
+	return ret_code;
+}
+
 int
 cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
 		    struct idpf_ctlq_info **cq)
@@ -377,3 +773,31 @@ cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
 {
 	cpfl_ctlq_remove(hw, cq);
 }
+
+int
+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		     struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			      uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
+
+int
+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			 struct idpf_ctlq_msg *msg_status[])
+{
+	return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
index 930d717f63..740ae6522c 100644
--- a/drivers/net/cpfl/cpfl_controlq.h
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -14,6 +14,13 @@
 #define CPFL_DFLT_MBX_RING_LEN		512
 #define CPFL_CFGQ_RING_LEN		512
 
+/* CRQ/CSQ specific error codes */
+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */
+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */
+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */
+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */
+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */
+
 /* Generic queue info structures */
 /* MB, CONFIG and EVENT q do not have extended info */
 struct cpfl_ctlq_create_info {
@@ -44,8 +51,25 @@ int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
 int cpfl_ctlq_add(struct idpf_hw *hw,
 		  struct cpfl_ctlq_create_info *qinfo,
 		  struct idpf_ctlq_info **cq);
+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+		       struct idpf_ctlq_msg *msg_status[]);
+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			    u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg);
 int cpfl_vport_ctlq_add(struct idpf_hw *hw,
 			struct cpfl_ctlq_create_info *qinfo,
 			struct idpf_ctlq_info **cq);
 void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			 u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+			 struct idpf_ctlq_msg q_msg[]);
+
+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+				  u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+			     struct idpf_ctlq_msg *msg_status[]);
 #endif
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 88e2ecf754..cb407e66af 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 #include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
 	adapter->vports[vport->sw_idx] = NULL;
+	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
 	rte_free(cpfl_vport);
 
 	return 0;
@@ -2466,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
 	return 0;
 }
 
+int
+cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size,
+			 int batch_size)
+{
+	int i;
+
+	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
+		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < batch_size; i++) {
+		dma[i].va = (void *)((uint64_t)orig_dma->va + size * (i + 1));
+		dma[i].pa = orig_dma->pa + size * (i + 1);
+		dma[i].size = size;
+		dma[i].zone = NULL;
+	}
+	return 0;
+}
+
 static int
 cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
@@ -2515,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
 			    &dev->data->mac_addrs[0]);
 
+	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
+	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
+				       cpfl_vport->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		goto err_mac_addrs;
+
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
 		memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
 		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7f83d170d7..8eeeac9910 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -147,10 +147,14 @@ enum cpfl_itf_type {
 
 TAILQ_HEAD(cpfl_flow_list, rte_flow);
 
+#define CPFL_FLOW_BATCH_SIZE  490
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
 	struct cpfl_flow_list flow_list;
+	struct idpf_dma_mem flow_dma;
+	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
 	void *data;
 };
 
@@ -240,6 +244,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
+int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma,
+			     uint32_t size, int batch_size);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
new file mode 100644
index 0000000000..50fac55432
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.c
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		   struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_msg **msg_ptr_list;
+	u16 clean_count = 0;
+	int num_cleaned = 0;
+	int retries = 0;
+	int ret = 0;
+
+	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+	if (!msg_ptr_list) {
+		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+		goto send_err;
+	}
+
+	while (retries <= CTLQ_SEND_RETRIES) {
+		clean_count = num_q_msg - num_cleaned;
+		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+					       &msg_ptr_list[num_cleaned]);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+			goto send_err;
+		}
+
+		num_cleaned += clean_count;
+		retries++;
+		if (num_cleaned >= num_q_msg)
+			break;
+		rte_delay_us_sleep(10);
+	}
+
+	if (retries > CTLQ_SEND_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for completions");
+		ret = -1;
+		goto send_err;
+	}
+
+send_err:
+	if (msg_ptr_list)
+		free(msg_ptr_list);
+err:
+	return ret;
+}
+
+static int
+cpfl_process_rx_ctlq_msg(u16 num_q_msg, struct idpf_ctlq_msg *q_msg)
+{
+	u16 i;
+
+	if (!num_q_msg || !q_msg)
+		return -EINVAL;
+
+	for (i = 0; i < num_q_msg; i++) {
+		if (q_msg[i].status == CPFL_CFG_PKT_ERR_OK) {
+			continue;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_EEXIST &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_add_rule) {
+			PMD_INIT_LOG(ERR, "The rule has confliction with already existed one");
+			return -EINVAL;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_ENOTFND &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_del_rule) {
+			PMD_INIT_LOG(ERR, "The rule has already deleted");
+			return -EINVAL;
+		} else {
+			PMD_INIT_LOG(ERR, "Invalid rule");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		      struct idpf_ctlq_msg q_msg[])
+{
+	int retries = 0;
+	struct idpf_dma_mem *dma;
+	u16 i;
+	uint16_t buff_cnt;
+	int ret = 0, handle_rule = 0;
+
+	retries = 0;
+	while (retries <= CTLQ_RECEIVE_RETRIES) {
+		rte_delay_us_sleep(10);
+		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+		    ret != CPFL_ERR_CTLQ_ERROR) {
+			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_EMPTY)
+			break;
+
+		ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to process rx_ctrlq msg");
+			handle_rule = ret;
+		}
+
+		for (i = 0; i < num_q_msg; i++) {
+			if (q_msg[i].data_len > 0)
+				dma = q_msg[i].ctx.indirect.payload;
+			else
+				dma = NULL;
+
+			buff_cnt = dma ? 1 : 0;
+			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+			if (ret)
+				PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+		}
+		break;
+	}
+
+	if (retries > CTLQ_RECEIVE_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+		ret = -1;
+	}
+
+	return ret + handle_rule;
+}
+
+static int
+cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		   struct idpf_ctlq_msg *msg)
+{
+	struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	struct cpfl_rule_cfg_data cfg = {0};
+
+	/* prepare rule blob */
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(&cfg, 0, sizeof(cfg));
+
+	/* fill info for both query and add/update */
+	cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+				   minfo->pin_mod_content,
+				   minfo->mod_index,
+				   &cfg.ext.mod_content);
+
+	/* only fill content for add/update */
+	memcpy(blob->mod_blob, minfo->mod_content,
+	       minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+	/* pack message */
+	cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+				       rinfo->cookie,
+				       0, /* vsi_id not used for mod */
+				       rinfo->port_num,
+				       NO_HOST_NEEDED,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       (u16)sizeof(*blob),
+				       (void *)dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		       struct idpf_ctlq_msg *msg, bool add)
+{
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	struct cpfl_rule_cfg_data cfg;
+	uint16_t cfg_ctrl;
+
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(msg, 0, sizeof(*msg));
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+							  rinfo->sem.sub_prof_id,
+							  rinfo->sem.pin_to_cache,
+							  rinfo->sem.fixed_fetch);
+		cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+					rinfo->act_bytes, rinfo->act_byte_len,
+					cfg_ctrl, blob);
+		opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+	} else {
+		PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
+		return -1;
+	}
+
+	cpfl_fill_rule_cfg_data_common(opc,
+				       rinfo->cookie,
+				       rinfo->vsi,
+				       rinfo->port_num,
+				       rinfo->host_id,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+	       struct idpf_ctlq_msg *msg, bool add)
+{
+	int ret = 0;
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
+			ret = -1;
+	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+		if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)
+			ret = -1;
+	} else {
+		PMD_INIT_LOG(ERR, "Invalid type of rule");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+int
+cpfl_rule_process(struct cpfl_itf *itf,
+		  struct idpf_ctlq_info *tx_cq,
+		  struct idpf_ctlq_info *rx_cq,
+		  struct cpfl_rule_info *rinfo,
+		  int rule_num,
+		  bool add)
+{
+	struct idpf_hw *hw = &itf->adapter->base.hw;
+	int i;
+	int ret = 0;
+
+	if (rule_num == 0)
+		return 0;
+
+	for (i = 0; i < rule_num; i++) {
+		ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Could not pack rule");
+			return ret;
+		}
+	}
+	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to send control message");
+		return ret;
+	}
+	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to update rule");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
new file mode 100644
index 0000000000..ed757b80b1
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+	uint16_t prof_id;
+	uint8_t sub_prof_id;
+	uint8_t key[CPFL_MAX_KEY_LEN];
+	uint8_t key_byte_len;
+	uint8_t pin_to_cache;
+	uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+	uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+	uint8_t mod_content_byte_len;
+	uint32_t mod_index;
+	uint8_t pin_mod_content;
+	uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+	CPFL_RULE_TYPE_NONE,
+	CPFL_RULE_TYPE_SEM,
+	CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+	enum cpfl_rule_type type;
+	uint64_t cookie;
+	uint8_t host_id;
+	uint8_t port_num;
+	uint8_t resp_req;
+	/* TODO: change this to be dynamically allocated/reallocated */
+	uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+	uint8_t act_byte_len;
+	/* vsi is used for lem and lpm rules */
+	uint16_t vsi;
+	uint8_t clear_mirror_1st_state;
+	/* mod related fields */
+	union {
+		struct cpfl_mod_rule_info mod;
+		struct cpfl_sem_rule_info sem;
+	};
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_process(struct cpfl_itf *itf,
+		      struct idpf_ctlq_info *tx_cq,
+		      struct idpf_ctlq_info *rx_cq,
+		      struct cpfl_rule_info *rinfo,
+		      int rule_num,
+		      bool add);
+int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		       struct idpf_ctlq_msg q_msg[]);
+int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+			  struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 53eb5aecad..a06265e6d5 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -49,6 +49,7 @@ if js_dep.found()
         sources += files(
 		'cpfl_flow.c',
                 'cpfl_flow_parser.c',
+		'cpfl_fxp_rule.c',
         )
         dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
         ext_deps += js_dep
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v5 6/9] net/cpfl: add fxp flow engine
  2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
                         ` (5 preceding siblings ...)
  2023-09-15 10:00       ` [PATCH v5 5/9] net/cpfl: add fxp rule module Zhang, Yuying
@ 2023-09-15 10:00       ` Zhang, Yuying
  2023-09-15 10:00       ` [PATCH v5 7/9] net/cpfl: add flow support for representor Zhang, Yuying
                         ` (2 subsequent siblings)
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-15 10:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Adapt fxp low level as a flow engine.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h          |  27 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 583 ++++++++++++++++++++++++
 drivers/net/cpfl/meson.build            |   1 +
 3 files changed, 611 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 8eeeac9910..efb0eb5251 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -85,6 +85,8 @@
 
 #define CPFL_RX_CFGQ_NUM	4
 #define CPFL_TX_CFGQ_NUM	4
+#define CPFL_FPCP_CFGQ_TX	0
+#define CPFL_FPCP_CFGQ_RX	1
 #define CPFL_CFGQ_NUM		8
 
 /* bit[15:14] type
@@ -219,6 +221,8 @@ struct cpfl_adapter_ext {
 	struct rte_hash *repr_allowlist_hash;
 
 	struct cpfl_flow_js_parser *flow_parser;
+	struct rte_bitmap *mod_bm;
+	void *mod_bm_mem;
 
 	struct cpfl_metadata meta;
 
@@ -312,4 +316,27 @@ cpfl_get_vsi_id(struct cpfl_itf *itf)
 	return CPFL_INVALID_HW_ID;
 }
 
+static inline struct cpfl_itf *
+cpfl_get_itf_by_port_id(uint16_t port_id)
+{
+	struct rte_eth_dev *dev;
+
+	if (port_id >= RTE_MAX_ETHPORTS) {
+		PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS);
+		return NULL;
+	}
+
+	dev = &rte_eth_devices[port_id];
+	if (dev->state == RTE_ETH_DEV_UNUSED) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id);
+		return NULL;
+	}
+
+	if (!dev->data) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id);
+		return NULL;
+	}
+
+	return CPFL_DEV_TO_ITF(dev);
+}
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
new file mode 100644
index 0000000000..e0c08a77c3
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <math.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_memcpy.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include <ethdev_driver.h>
+#include "cpfl_rules.h"
+#include "cpfl_logs.h"
+#include "cpfl_ethdev.h"
+#include "cpfl_flow.h"
+#include "cpfl_fxp_rule.h"
+#include "cpfl_flow_parser.h"
+
+#define CPFL_COOKIE_DEF		0x1000
+#define CPFL_MOD_COOKIE_DEF	0x1237561
+#define CPFL_PREC_DEF		1
+#define CPFL_PREC_SET		5
+#define CPFL_TYPE_ID		3
+#define CPFL_OFFSET		0x0a
+#define CPFL_HOST_ID_DEF	0
+#define CPFL_PF_NUM_DEF		0
+#define CPFL_PORT_NUM_DEF	0
+#define CPFL_RESP_REQ_DEF	2
+#define CPFL_PIN_TO_CACHE_DEF	0
+#define CPFL_CLEAR_MIRROR_1ST_STATE_DEF	0
+#define CPFL_FIXED_FETCH_DEF	0
+#define CPFL_PTI_DEF		0
+#define CPFL_MOD_OBJ_SIZE_DEF	0
+#define CPFL_PIN_MOD_CONTENT_DEF	0
+
+#define CPFL_MAX_MOD_CONTENT_INDEX	256
+#define CPFL_MAX_MR_ACTION_NUM	8
+
+/* Struct used when parse detailed rule information with json file */
+struct cpfl_rule_info_meta {
+	struct cpfl_flow_pr_action pr_action;	/* json action field of pattern rule */
+	uint32_t pr_num;			/* number of pattern rules */
+	uint32_t mr_num;			/* number of modification rules */
+	uint32_t rule_num;			/* number of all rules */
+	struct cpfl_rule_info rules[0];
+};
+
+static uint32_t cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad);
+static void cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
+uint64_t cpfl_rule_cookie = CPFL_COOKIE_DEF;
+
+static int
+cpfl_fxp_create(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim = meta;
+	struct cpfl_vport *vport;
+
+	if (!rim)
+		return ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		/* Every vport has one pair control queues configured to handle message.
+		 * Even index is tx queue and odd index is rx queue.
+		 */
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
+				rim->rules, rim->rule_num, true);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "cpfl filter create flow fail");
+		rte_free(rim);
+		return ret;
+	}
+
+	flow->rule = rim;
+
+	return ret;
+}
+
+static inline void
+cpfl_fxp_rule_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+	flow->rule = NULL;
+}
+
+static int
+cpfl_fxp_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim;
+	uint32_t i;
+	struct cpfl_vport *vport;
+
+	rim = flow->rule;
+	if (!rim) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "no such flow create by cpfl filter");
+
+		return -rte_errno;
+	}
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		ret = -rte_errno;
+		goto err;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules,
+				rim->rule_num, false);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to destroy cpfl filter rule");
+		goto err;
+	}
+
+	/* free mod index */
+	for (i = rim->pr_num; i < rim->rule_num; i++)
+		cpfl_fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
+err:
+	cpfl_fxp_rule_free(flow);
+	return ret;
+}
+
+static bool
+cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action,
+		       struct cpfl_rule_info_meta *rim,
+		       int i)
+{
+	if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		struct cpfl_rule_info *rinfo = &rim->rules[i];
+
+		rinfo->type = CPFL_RULE_TYPE_SEM;
+		rinfo->sem.prof_id = pr_action->sem.prof;
+		rinfo->sem.sub_prof_id = pr_action->sem.subprof;
+		rinfo->sem.key_byte_len = pr_action->sem.keysize;
+		rte_memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len);
+		rinfo->sem.pin_to_cache = CPFL_PIN_TO_CACHE_DEF;
+		rinfo->sem.fixed_fetch = CPFL_FIXED_FETCH_DEF;
+	} else {
+		PMD_DRV_LOG(ERR, "Invalid pattern item.");
+		return false;
+	}
+
+	return true;
+}
+
+static int
+cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_rule_info *match_rinfo,
+		       struct cpfl_rule_info *mod_rinfo,
+		       const struct cpfl_flow_mr_action *mr_action)
+{
+	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
+	uint32_t mod_idx;
+	int i;
+	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
+	union cpfl_action_set *act_set =
+		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
+
+	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
+		return -EINVAL;
+
+	*act_set = cpfl_act_mod_profile(CPFL_PREC_DEF,
+					mr_action->mod.prof,
+					CPFL_PTI_DEF,
+					0, /* append */
+					0, /* prepend */
+					CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_idx = cpfl_fxp_mod_idx_alloc(adapter);
+	if (mod_idx == CPFL_MAX_MOD_CONTENT_INDEX) {
+		PMD_DRV_LOG(ERR, "Out of Mod Index.");
+		return -ENOMEM;
+	}
+
+	*act_set = cpfl_act_mod_addr(CPFL_PREC_DEF, mod_idx);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
+	minfo->mod_obj_size = CPFL_MOD_OBJ_SIZE_DEF;
+	minfo->pin_mod_content = CPFL_PIN_MOD_CONTENT_DEF;
+	minfo->mod_index = mod_idx;
+	mod_rinfo->cookie = CPFL_MOD_COOKIE_DEF;
+	mod_rinfo->port_num = CPFL_PORT_NUM_DEF;
+	mod_rinfo->resp_req = CPFL_RESP_REQ_DEF;
+
+	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
+	for (i = 0; i < minfo->mod_content_byte_len; i++)
+		minfo->mod_content[i] = mr_action->mod.data[i];
+
+	return 0;
+}
+
+#define CPFL_FXP_MAX_QREGION_SIZE 128
+#define CPFL_INVALID_QUEUE_ID -2
+static int
+cpfl_fxp_parse_action(struct cpfl_itf *itf,
+		      const struct rte_flow_action *actions,
+		      const struct cpfl_flow_mr_action *mr_action,
+		      struct cpfl_rule_info_meta *rim,
+		      int priority,
+		      int index)
+{
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *rss;
+	struct rte_eth_dev_data *data;
+	enum rte_flow_action_type action_type;
+	struct cpfl_vport *vport;
+	/* used when action is PORT_REPRESENTOR type */
+	struct cpfl_itf *dst_itf;
+	uint16_t dev_id; /* vsi id */
+	int queue_id = -1;
+	bool fwd_vsi = false;
+	bool fwd_q = false;
+	uint32_t i;
+	struct cpfl_rule_info *rinfo = &rim->rules[index];
+	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
+
+	priority = CPFL_PREC_MAX - priority;
+	for (action = actions; action->type !=
+	     RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			if (!fwd_vsi)
+				fwd_vsi = true;
+			else
+				goto err;
+
+			act_ethdev = action->conf;
+			dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id);
+
+			if (!dst_itf)
+				goto err;
+
+			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
+				vport = (struct cpfl_vport *)dst_itf;
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			} else {
+				queue_id = CPFL_INVALID_QUEUE_ID;
+			}
+
+			dev_id = cpfl_get_vsi_id(dst_itf);
+
+			if (dev_id == CPFL_INVALID_HW_ID)
+				goto err;
+
+			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			act_q = action->conf;
+			data = itf->data;
+			if (act_q->index >= data->nb_rx_queues)
+				goto err;
+
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += act_q->index;
+			*act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			if (rss->queue_num <= 1)
+				goto err;
+			for (i = 0; i < rss->queue_num - 1; i++) {
+				if (rss->queue[i + 1] != rss->queue[i] + 1)
+					goto err;
+			}
+			data = itf->data;
+			if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues)
+				goto err;
+			if (!(rte_is_power_of_2(rss->queue_num) &&
+			      rss->queue_num <= CPFL_FXP_MAX_QREGION_SIZE))
+				goto err;
+
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += rss->queue[0];
+			*act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id,
+								  log(rss->queue_num) / log(2), 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			(*act_set).data = cpfl_act_drop(priority).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			(*act_set).data = cpfl_act_set_commit_mode(priority, 0).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			goto err;
+		}
+	}
+
+	if (mr_action) {
+		uint32_t i;
+
+		for (i = 0; i < rim->mr_num; i++)
+			if (cpfl_parse_mod_content(itf->adapter, rinfo,
+						   &rim->rules[rim->pr_num + i],
+						   &mr_action[i]))
+				goto err;
+	}
+
+	return 0;
+
+err:
+	PMD_DRV_LOG(ERR, "Invalid action type");
+	return -EINVAL;
+}
+
+static void
+cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo)
+{
+	if (cpfl_rule_cookie == ~0llu)
+		cpfl_rule_cookie = CPFL_COOKIE_DEF;
+	rinfo->cookie = cpfl_rule_cookie++;
+	rinfo->host_id = CPFL_HOST_ID_DEF;
+	rinfo->port_num = CPFL_PORT_NUM_DEF;
+	rinfo->resp_req = CPFL_RESP_REQ_DEF;
+	rinfo->clear_mirror_1st_state = CPFL_CLEAR_MIRROR_1ST_STATE_DEF;
+}
+
+static bool
+cpfl_is_mod_action(const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			return true;
+		default:
+			continue;
+		}
+	}
+	return false;
+}
+
+static int
+cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      void **meta)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_pr_action pr_action = { 0 };
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_flow_mr_action mr_action[CPFL_MAX_MR_ACTION_NUM] = { 0 };
+	uint32_t pr_num = 0;
+	uint32_t mr_num = 0;
+	struct cpfl_rule_info_meta *rim;
+	int ret;
+
+	ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr, &pr_action);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "No Match pattern support.");
+		return -EINVAL;
+	}
+
+	if (cpfl_is_mod_action(actions)) {
+		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "action parse fails.");
+			return -EINVAL;
+		}
+		mr_num++;
+	}
+
+	pr_num = 1;
+	rim = rte_zmalloc(NULL,
+			  sizeof(struct cpfl_rule_info_meta) +
+			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
+			  0);
+	if (!rim)
+		return -ENOMEM;
+
+	rim->pr_action = pr_action;
+	rim->pr_num = pr_num;
+	rim->mr_num = mr_num;
+	rim->rule_num = pr_num + mr_num;
+
+	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid pattern");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid action");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	cpfl_fill_rinfo_default_value(&rim->rules[0]);
+
+	if (!meta)
+		rte_free(rim);
+	else
+		*meta = rim;
+
+	return 0;
+}
+
+static int
+cpfl_fxp_mod_init(struct cpfl_adapter_ext *ad)
+{
+	uint32_t size = rte_bitmap_get_memory_footprint(CPFL_MAX_MOD_CONTENT_INDEX);
+	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+	if (!mem)
+		return -ENOMEM;
+
+	/* a set bit represent a free slot */
+	ad->mod_bm = rte_bitmap_init_with_all_set(CPFL_MAX_MOD_CONTENT_INDEX, mem, size);
+	if (!ad->mod_bm) {
+		rte_free(mem);
+		return -EINVAL;
+	}
+
+	ad->mod_bm_mem = mem;
+
+	return 0;
+}
+
+static void
+cpfl_fxp_mod_uninit(struct cpfl_adapter_ext *ad)
+{
+	rte_free(ad->mod_bm_mem);
+	ad->mod_bm_mem = NULL;
+	ad->mod_bm = NULL;
+}
+
+static uint32_t
+cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad)
+{
+	uint64_t slab = 0;
+	uint32_t pos = 0;
+
+	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
+		return CPFL_MAX_MOD_CONTENT_INDEX;
+
+	pos += __builtin_ffsll(slab) - 1;
+	rte_bitmap_clear(ad->mod_bm, pos);
+
+	return pos;
+}
+
+static void
+cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
+{
+	rte_bitmap_set(ad->mod_bm, idx);
+}
+
+static int
+cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
+	       struct rte_flow *flow __rte_unused,
+	       struct rte_flow_query_count *count __rte_unused,
+	       struct rte_flow_error *error)
+{
+	rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_HANDLE,
+			   NULL,
+			   "count action not supported by this module");
+
+	return -rte_errno;
+}
+
+static void
+cpfl_fxp_uninit(struct cpfl_adapter_ext *ad)
+{
+	cpfl_fxp_mod_uninit(ad);
+}
+
+static int
+cpfl_fxp_init(struct cpfl_adapter_ext *ad)
+{
+	int ret = 0;
+
+	ret = cpfl_fxp_mod_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct
+cpfl_flow_engine cpfl_fxp_engine = {
+	.type = CPFL_FLOW_ENGINE_FXP,
+	.init = cpfl_fxp_init,
+	.uninit = cpfl_fxp_uninit,
+	.create = cpfl_fxp_create,
+	.destroy = cpfl_fxp_destroy,
+	.query_count = cpfl_fxp_query,
+	.parse_pattern_action = cpfl_fxp_parse_pattern_action,
+};
+
+RTE_INIT(cpfl_sw_engine_init)
+{
+	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
+
+	cpfl_flow_engine_register(engine);
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index a06265e6d5..7c6a000933 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -50,6 +50,7 @@ if js_dep.found()
 		'cpfl_flow.c',
                 'cpfl_flow_parser.c',
 		'cpfl_fxp_rule.c',
+		'cpfl_flow_engine_fxp.c',
         )
         dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
         ext_deps += js_dep
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v5 7/9] net/cpfl: add flow support for representor
  2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
                         ` (6 preceding siblings ...)
  2023-09-15 10:00       ` [PATCH v5 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
@ 2023-09-15 10:00       ` Zhang, Yuying
  2023-09-15 10:00       ` [PATCH v5 8/9] app/test-pmd: refine encap content Zhang, Yuying
  2023-09-15 10:00       ` [PATCH v5 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-15 10:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Add flow support for representor, so representor can
create, destroy, validate and flush rules.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 doc/guides/nics/cpfl.rst                | 13 ++++
 doc/guides/rel_notes/release_23_11.rst  |  1 +
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 90 ++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_representor.c     | 29 ++++++++
 4 files changed, 130 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index aae157f0df..bcfa2a8a5b 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -194,3 +194,16 @@ low level hardware resources.
    .. code-block:: console
 
    dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
+
+#. Create one flow to forward ETH-IPV4-TCP from I/O port to a local(CPF's) vport::
+
+   .. code-block:: console
+
+   flow create 0 ingress group 1 pattern eth dst is 00:01:00:00:03:14 / ipv4 src is 192.168.0.1 \
+   dst is 192.168.0.2 / tcp / end actions port_representor port_id 0 / end
+
+#. Send the packet, and it should be displayed on PMD::
+
+   .. code-block:: console
+
+   sendp(Ether(dst='00:01:00:00:03:14')/IP(src='192.168.0.1',dst='192.168.0.2')/TCP(),iface="enp24s0f0")
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 3d9be208d0..bad71ad3fd 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -81,6 +81,7 @@ New Features
 * **Updated Intel cpfl driver.**
 
   * Added support for port representor.
+  * Added support for rte_flow.
 
 Removed Items
 -------------
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index e0c08a77c3..fed18d8349 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -73,6 +73,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 	struct cpfl_adapter_ext *ad = itf->adapter;
 	struct cpfl_rule_info_meta *rim = meta;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	if (!rim)
 		return ret;
@@ -83,6 +84,10 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 		 * Even index is tx queue and odd index is rx queue.
 		 */
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -122,6 +127,7 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	uint32_t i;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	rim = flow->rule;
 	if (!rim) {
@@ -135,6 +141,10 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	if (itf->type == CPFL_ITF_TYPE_VPORT) {
 		vport = (struct cpfl_vport *)itf;
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -257,6 +267,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 	int queue_id = -1;
 	bool fwd_vsi = false;
 	bool fwd_q = false;
+	bool is_vsi;
 	uint32_t i;
 	struct cpfl_rule_info *rinfo = &rim->rules[index];
 	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
@@ -267,6 +278,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 		action_type = action->type;
 		switch (action_type) {
 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
 			if (!fwd_vsi)
 				fwd_vsi = true;
 			else
@@ -285,12 +297,20 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 				queue_id = CPFL_INVALID_QUEUE_ID;
 			}
 
-			dev_id = cpfl_get_vsi_id(dst_itf);
+			is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
+				  dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR);
+			if (is_vsi)
+				dev_id = cpfl_get_vsi_id(dst_itf);
+			else
+				dev_id = cpfl_get_port_id(dst_itf);
 
 			if (dev_id == CPFL_INVALID_HW_ID)
 				goto err;
 
-			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			if (is_vsi)
+				*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			else
+				*act_set = cpfl_act_fwd_port(0, priority, 0, dev_id);
 			act_set++;
 			rinfo->act_byte_len += sizeof(union cpfl_action_set);
 			break;
@@ -414,6 +434,64 @@ cpfl_is_mod_action(const struct rte_flow_action actions[])
 	return false;
 }
 
+static bool
+cpfl_fxp_get_metadata_port(struct cpfl_itf *itf,
+			   const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	const struct rte_flow_action_ethdev *ethdev;
+	struct cpfl_itf *target_itf;
+	bool ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		ret = cpfl_metadata_write_port_id(itf);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "fail to write port id");
+			return false;
+		}
+	}
+
+	ret = cpfl_metadata_write_sourcevsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write source vsi id");
+		return false;
+	}
+
+	ret = cpfl_metadata_write_vsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write vsi id");
+		return false;
+	}
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			ethdev = (const struct rte_flow_action_ethdev *)action->conf;
+			target_itf = cpfl_get_itf_by_port_id(ethdev->port_id);
+			if (!target_itf) {
+				PMD_DRV_LOG(ERR, "fail to get target_itf by port id");
+				return false;
+			}
+			ret = cpfl_metadata_write_targetvsi(target_itf);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "fail to write target vsi id");
+				return false;
+			}
+			break;
+		default:
+			continue;
+		}
+	}
+
+	return true;
+}
+
 static int
 cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
@@ -430,7 +508,13 @@ cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	int ret;
 
-	ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr, &pr_action);
+	ret = cpfl_fxp_get_metadata_port(itf, actions);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Fail to save metadata.");
+		return -EINVAL;
+	}
+
+	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "No Match pattern support.");
 		return -EINVAL;
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 4d15a26c80..2ab04f1e60 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,8 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 static int
 cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
@@ -374,6 +376,22 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
+static int
+cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev,
+			   const struct rte_flow_ops **ops)
+{
+	if (!dev)
+		return -EINVAL;
+
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -385,6 +403,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
 
 	.link_update		= cpfl_repr_link_update,
+	.flow_ops_get		= cpfl_dev_repr_flow_ops_get,
 };
 
 static int
@@ -393,6 +412,7 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
 	struct cpfl_repr_param *param = init_param;
 	struct cpfl_adapter_ext *adapter = param->adapter;
+	int ret;
 
 	repr->repr_id = param->repr_id;
 	repr->vport_info = param->vport_info;
@@ -402,6 +422,15 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
 		repr->func_up = true;
 
+	TAILQ_INIT(&repr->itf.flow_list);
+	memset(repr->itf.dma, 0, sizeof(repr->itf.dma));
+	memset(repr->itf.msg, 0, sizeof(repr->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		return ret;
+
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v5 8/9] app/test-pmd: refine encap content
  2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
                         ` (7 preceding siblings ...)
  2023-09-15 10:00       ` [PATCH v5 7/9] net/cpfl: add flow support for representor Zhang, Yuying
@ 2023-09-15 10:00       ` Zhang, Yuying
  2023-09-15 10:00       ` [PATCH v5 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-15 10:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu
  Cc: mingxia.liu, stable

From: Yuying Zhang <yuying.zhang@intel.com>

Refine vxlan encap content of all protocol headers.

Fixes: 1960be7d32f8 ("app/testpmd: add VXLAN encap/decap")
Cc: stable@dpdk.org

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 app/test-pmd/cmdline_flow.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 94827bcc4a..b6cc0d9620 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -8514,7 +8514,7 @@ parse_setup_vxlan_encap_data(struct action_vxlan_encap_data *action_vxlan_encap_
 				.type = RTE_FLOW_ITEM_TYPE_END,
 			},
 		},
-		.item_eth.hdr.ether_type = 0,
+		.item_eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4),
 		.item_vlan = {
 			.hdr.vlan_tci = vxlan_encap_conf.vlan_tci,
 			.hdr.eth_proto = 0,
@@ -8522,24 +8522,32 @@ parse_setup_vxlan_encap_data(struct action_vxlan_encap_data *action_vxlan_encap_
 		.item_ipv4.hdr = {
 			.src_addr = vxlan_encap_conf.ipv4_src,
 			.dst_addr = vxlan_encap_conf.ipv4_dst,
+			.version_ihl = RTE_IPV4_VHL_DEF,
+			.next_proto_id = IPPROTO_UDP,
+			.time_to_live = IPDEFTTL,
+			.hdr_checksum = rte_cpu_to_be_16(1),
 		},
 		.item_udp.hdr = {
 			.src_port = vxlan_encap_conf.udp_src,
 			.dst_port = vxlan_encap_conf.udp_dst,
+			.dgram_cksum = RTE_BE16(0x01),
 		},
-		.item_vxlan.hdr.flags = 0,
+		.item_vxlan.hdr.flags = 0x08,
 	};
 	memcpy(action_vxlan_encap_data->item_eth.hdr.dst_addr.addr_bytes,
 	       vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(action_vxlan_encap_data->item_eth.hdr.src_addr.addr_bytes,
 	       vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
 	if (!vxlan_encap_conf.select_ipv4) {
+		action_vxlan_encap_data->item_eth.type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
 		memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
 		       &vxlan_encap_conf.ipv6_src,
 		       sizeof(vxlan_encap_conf.ipv6_src));
 		memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
 		       &vxlan_encap_conf.ipv6_dst,
 		       sizeof(vxlan_encap_conf.ipv6_dst));
+		action_vxlan_encap_data->item_ipv6.hdr.proto = IPPROTO_UDP;
+		action_vxlan_encap_data->item_ipv6.hdr.hop_limits = IPDEFTTL;
 		action_vxlan_encap_data->items[2] = (struct rte_flow_item){
 			.type = RTE_FLOW_ITEM_TYPE_IPV6,
 			.spec = &action_vxlan_encap_data->item_ipv6,
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v5 9/9] net/cpfl: fix incorrect status calculation
  2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
                         ` (8 preceding siblings ...)
  2023-09-15 10:00       ` [PATCH v5 8/9] app/test-pmd: refine encap content Zhang, Yuying
@ 2023-09-15 10:00       ` Zhang, Yuying
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-09-15 10:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, beilei.xing, jingjing.wu; +Cc: mingxia.liu

From: Yuying Zhang <yuying.zhang@intel.com>

Fix the incorrect ingress packet number calculation.

Fixes: e3289d8fb63f ("net/cpfl: support basic statistics")

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index cb407e66af..5b5abc7684 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -322,7 +322,7 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
 		idpf_vport_stats_update(&vport->eth_stats_offset, pstats);
 		stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
-				pstats->rx_broadcast - pstats->rx_discards;
+				  pstats->rx_broadcast;
 		stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
 						pstats->tx_unicast;
 		stats->imissed = pstats->rx_discards;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v5 1/9] net/cpfl: add json parser for rte flow pattern rules
  2023-09-15 10:00       ` [PATCH v5 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
@ 2023-09-15 11:14         ` Zhang, Qi Z
  0 siblings, 0 replies; 128+ messages in thread
From: Zhang, Qi Z @ 2023-09-15 11:14 UTC (permalink / raw)
  To: Zhang, Yuying, dev, Xing, Beilei, Wu, Jingjing
  Cc: Liu, Mingxia, Qiao, Wenjing, Richardson, Bruce



> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Friday, September 15, 2023 6:01 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; dev@dpdk.org; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>
> Cc: Liu, Mingxia <mingxia.liu@intel.com>; Qiao, Wenjing
> <wenjing.qiao@intel.com>
> Subject: [PATCH v5 1/9] net/cpfl: add json parser for rte flow pattern rules
> 
> From: Wenjing Qiao <wenjing.qiao@intel.com>
> 
> Add devargs "flow_parser" for rte flow json parser which depends on json-c
> library.

Please ignore the json-c dependency here, we've been kindly reminded that DPDK have jansson already.


^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v5 4/9] net/cpfl: add FXP low level implementation
  2023-09-15 10:00       ` [PATCH v5 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
@ 2023-09-15 11:19         ` Zhang, Qi Z
  0 siblings, 0 replies; 128+ messages in thread
From: Zhang, Qi Z @ 2023-09-15 11:19 UTC (permalink / raw)
  To: Zhang, Yuying, dev, Xing, Beilei, Wu, Jingjing; +Cc: Liu, Mingxia



> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Friday, September 15, 2023 6:01 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; dev@dpdk.org; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>
> Cc: Liu, Mingxia <mingxia.liu@intel.com>
> Subject: [PATCH v5 4/9] net/cpfl: add FXP low level implementation
> 
> From: Yuying Zhang <yuying.zhang@intel.com>
> 
> Add FXP low level implementation for CPFL rte_flow to
> create/delete rules as well as setup the control vport
> and control queue.

It's better to separate this big patch into two parts

one for control queue enabling 
one for introducing a set of low level help functions which will be consumed by the following fxp rule module


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [PATCH v4 1/9] net/cpfl: add json parser for rte flow pattern rules
  2023-08-15 16:50       ` [PATCH v4 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
@ 2023-09-15 15:11         ` Stephen Hemminger
  0 siblings, 0 replies; 128+ messages in thread
From: Stephen Hemminger @ 2023-09-15 15:11 UTC (permalink / raw)
  To: Zhang, Yuying
  Cc: dev, qi.z.zhang, beilei.xing, jingjing.wu, mingxia.liu, Wenjing Qiao

On Tue, 15 Aug 2023 16:50:42 +0000
"Zhang, Yuying" <yuying.zhang@intel.com> wrote:

> +Rte_flow
> +~~~~~~~~~~~~~
> +
> +PMD uses a json file to direct CPF PMD to parse rte_flow tokens into
> +low level hardware resources.
> +
> +- Required Libraries
> +
> +  * json-c (version 0.14+)
> +
> +    * For Ubuntu, it can be installed using `apt install libjson-c-dev`

Other parts of DPDK are using jansson library with equivalent functionality.
Introducing another dependency to DPDK which is duplicate of already used
code just makes life more complicated and difficult for existing users.

Therefore my recommendation is that the patch not be accepted as is.

^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v7 0/8] add rte flow support for cpfl
  2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
                           ` (7 preceding siblings ...)
  2023-08-22  1:02         ` [PATCH v6 8/8] net/cpfl: add flow support for representor Zhang, Yuying
@ 2023-09-26 18:16         ` yuying.zhang
  2023-09-26 18:16           ` [PATCH v7 1/8] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
                             ` (8 more replies)
  8 siblings, 9 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-26 18:16 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying <yuying.zhang@intel.com>

This patchset add rte flow support for cpfl driver.
It depends on the following patch set:
http://patchwork.dpdk.org/project/dpdk/cover/20230912173039.1612287-1-beilei.xing@intel.com/

Wenjing Qiao (2):
  net/cpfl: add json parser for rte flow pattern rules
  net/cpfl: build action mapping rules from JSON

Yuying Zhang (6):
  net/cpfl: set up rte flow skeleton
  net/cpfl: set up control path
  net/cpfl: add FXP low level implementation
  net/cpfl: add fxp rule module
  net/cpfl: add fxp flow engine
  net/cpfl: add flow support for representor
---
v7:
* refine commit log
* fix compile issues

v6:
* use existed jansson instead of json-c library.
* refine "add FXP low level implementation"

V5:
* Add input validation for some functions.


 doc/guides/nics/cpfl.rst                |   53 +
 doc/guides/rel_notes/release_23_11.rst  |    1 +
 drivers/net/cpfl/cpfl_actions.h         |  858 +++++++++++
 drivers/net/cpfl/cpfl_controlq.c        |  801 ++++++++++
 drivers/net/cpfl/cpfl_controlq.h        |   75 +
 drivers/net/cpfl/cpfl_ethdev.c          |  392 ++++-
 drivers/net/cpfl/cpfl_ethdev.h          |  128 ++
 drivers/net/cpfl/cpfl_flow.c            |  339 +++++
 drivers/net/cpfl/cpfl_flow.h            |   85 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c |  667 ++++++++
 drivers/net/cpfl/cpfl_flow_parser.c     | 1839 +++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h     |  267 ++++
 drivers/net/cpfl/cpfl_fxp_rule.c        |  296 ++++
 drivers/net/cpfl/cpfl_fxp_rule.h        |   68 +
 drivers/net/cpfl/cpfl_representor.c     |   29 +
 drivers/net/cpfl/cpfl_rules.c           |  126 ++
 drivers/net/cpfl/cpfl_rules.h           |  306 ++++
 drivers/net/cpfl/cpfl_vchnl.c           |  144 ++
 drivers/net/cpfl/meson.build            |   12 +
 19 files changed, 6485 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v7 1/8] net/cpfl: add json parser for rte flow pattern rules
  2023-09-26 18:16         ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
@ 2023-09-26 18:16           ` yuying.zhang
  2023-09-26 19:03             ` Stephen Hemminger
  2023-09-26 18:16           ` [PATCH v7 2/8] net/cpfl: build action mapping rules from JSON yuying.zhang
                             ` (7 subsequent siblings)
  8 siblings, 1 reply; 128+ messages in thread
From: yuying.zhang @ 2023-09-26 18:16 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Add devargs "flow_parser" for rte flow json parser which depends
on jansson library.

Example:
    -a ca:00.0,flow_parser="refpkg.json"

Add json parser for rte flow pattern rules which can build rules
that maps from a set of rte flow items to hardware representations.

The cpfl PMD supports utilizing a JSON configuration file to translate
rte flow tokens into low level hardware resources. The JSON configuration
file is provided by the hardware vendor and is intended to work exclusively
with a specific P4 pipeline configuration, which must be compiled and
programmed into the hardware.

The format of the JSON file strictly follows the internal specifications
of the hardware vendor and is not meant to be modified directly by
users.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 doc/guides/nics/cpfl.rst            |   38 +
 drivers/net/cpfl/cpfl_ethdev.c      |   38 +-
 drivers/net/cpfl/cpfl_ethdev.h      |   76 ++
 drivers/net/cpfl/cpfl_flow_parser.c | 1303 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  167 ++++
 drivers/net/cpfl/meson.build        |    7 +
 6 files changed, 1628 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 83a18c3f2e..e17347d15c 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -128,12 +128,32 @@ Runtime Configuration
 
     -a BDF,representor=vf[0-3],representor=c1pf1
 
+- ``flow_parser`` (default ``not enabled``)
+
+  The cpfl PMD supports utilizing a JSON config file to translate rte_flow tokens into
+  low-level hardware resources.
+
+  The JSON configuration file is provided by the hardware vendor and is intended to work
+  exclusively with a specific P4 pipeline configuration, which must be compiled and programmed
+  into the hardware.
+
+  The format of the JSON file strictly follows the internal specifications of the hardware
+  vendor and is not meant to be modified directly by users.
+
+  Using the ``devargs`` option ``flow_parser`` the user can specify the path
+  of a json file, for example::
+
+    -a ca:00.0,flow_parser="refpkg.json"
+
+  Then the PMD will load json file for device ``ca:00.0``.
+  The parameter is optional.
 
 Driver compilation and testing
 ------------------------------
 
 Refer to the document :doc:`build_and_test` for details.
 
+The jansson library must be installed to use rte_flow.
 
 Features
 --------
@@ -164,3 +184,21 @@ Hairpin queue
 E2100 Series can loopback packets from RX port to TX port.
 This feature is called port-to-port or hairpin.
 Currently, the PMD only supports single port hairpin.
+
+Rte_flow
+~~~~~~~~~~~~~
+
+PMD uses a json file to direct CPF PMD to parse rte_flow tokens into
+low level hardware resources.
+
+- Required Libraries
+
+  * jansson
+
+    * For Ubuntu, it can be installed using `apt install libjansson-dev`
+
+- run testpmd with the json file
+
+   .. code-block:: console
+
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 189072ab33..1745f703c8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -21,6 +21,10 @@
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
 
+#ifdef RTE_HAS_JANSSON
+#define CPFL_FLOW_PARSER	"flow_parser"
+#endif
+
 rte_spinlock_t cpfl_adapter_lock;
 /* A list for all adapters, one adapter matches one PCI device */
 struct cpfl_adapter_list cpfl_adapter_list;
@@ -31,6 +35,9 @@ static const char * const cpfl_valid_args_first[] = {
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
+#ifdef RTE_HAS_JANSSON
+	CPFL_FLOW_PARSER,
+#endif
 	NULL
 };
 
@@ -1537,6 +1544,24 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 	return 0;
 }
 
+#ifdef RTE_HAS_JANSSON
+static int
+parse_file(const char *key, const char *value, void *args)
+{
+	char *name = args;
+
+	if (strlen(value) > CPFL_FLOW_FILE_LEN - 1) {
+		PMD_DRV_LOG(ERR, "file path(%s) is too long.", value);
+		return -1;
+	}
+
+	PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key);
+	strlcpy(name, value, CPFL_FLOW_FILE_LEN);
+
+	return 0;
+}
+#endif
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
@@ -1585,7 +1610,18 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 				 &adapter->base.is_rx_singleq);
 	if (ret != 0)
 		goto fail;
-
+#ifdef RTE_HAS_JANSSON
+	if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) {
+		ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER,
+					 &parse_file, cpfl_args->flow_parser);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to parser flow_parser, ret: %d", ret);
+			goto fail;
+		}
+	} else {
+		cpfl_args->flow_parser[0] = '\0';
+	}
+#endif
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index d0dcc0cc05..383dbd14c6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -77,6 +77,11 @@
 #define CPFL_VPORT_LAN_PF	0
 #define CPFL_VPORT_LAN_VF	1
 
+#define CPFL_FLOW_FILE_LEN 100
+#define CPFL_INVALID_HW_ID	UINT16_MAX
+#define CPFL_META_CHUNK_LENGTH	1024
+#define CPFL_META_LENGTH	32
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -99,6 +104,7 @@ struct cpfl_devargs {
 	uint16_t req_vport_nb;
 	uint8_t repr_args_num;
 	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
+	char flow_parser[CPFL_FLOW_FILE_LEN];
 };
 
 struct p2p_queue_chunks_info {
@@ -165,6 +171,20 @@ struct cpfl_repr {
 	bool func_up; /* If the represented function is up */
 };
 
+struct cpfl_metadata_chunk {
+	int type;
+	uint8_t data[CPFL_META_CHUNK_LENGTH];
+};
+
+/**
+ * It is driver's responsibility to simlulate a metadata buffer which
+ * can be used as data source to fill the key of a flow rule.
+ */
+struct cpfl_metadata {
+	int length;
+	struct cpfl_metadata_chunk chunks[CPFL_META_LENGTH];
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -185,6 +205,8 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
+
+	struct cpfl_metadata meta;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -211,4 +233,58 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
+static inline uint16_t
+cpfl_get_port_id(struct cpfl_itf *itf)
+{
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		struct cpfl_vport *vport = (void *)itf;
+
+		return vport->base.devarg_id;
+	}
+
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline uint16_t
+cpfl_get_vsi_id(struct cpfl_itf *itf)
+{
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_vport_info *info;
+	uint32_t vport_id;
+	int ret;
+	struct cpfl_vport_id vport_identity;
+
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		struct cpfl_repr *repr = (void *)itf;
+
+		return repr->vport_info->vport.info.vsi_id;
+	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
+
+		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		/* host: CPFL_HOST0_CPF_ID, acc: CPFL_ACC_CPF_ID */
+		vport_identity.pf_id = CPFL_ACC_CPF_ID;
+		vport_identity.vf_id = 0;
+		vport_identity.vport_id = vport_id;
+		ret = rte_hash_lookup_data(adapter->vport_map_hash,
+					   &vport_identity,
+					   (void **)&info);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "vport id not exist");
+			goto err;
+		}
+
+		return info->vport.info.vsi_id;
+	}
+
+err:
+	return CPFL_INVALID_HW_ID;
+}
+
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
new file mode 100644
index 0000000000..308f9c9736
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -0,0 +1,1303 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <arpa/inet.h>
+#include <asm-generic/errno-base.h>
+#include <stdint.h>
+
+#include "cpfl_flow_parser.h"
+#include "cpfl_ethdev.h"
+#include "rte_malloc.h"
+
+static enum rte_flow_item_type
+cpfl_get_item_type_by_str(const char *type)
+{
+	if (strcmp(type, "eth") == 0)
+		return RTE_FLOW_ITEM_TYPE_ETH;
+	else if (strcmp(type, "ipv4") == 0)
+		return RTE_FLOW_ITEM_TYPE_IPV4;
+	else if (strcmp(type, "tcp") == 0)
+		return RTE_FLOW_ITEM_TYPE_TCP;
+	else if (strcmp(type, "udp") == 0)
+		return RTE_FLOW_ITEM_TYPE_UDP;
+	else if (strcmp(type, "vxlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VXLAN;
+	else if (strcmp(type, "icmp") == 0)
+		return RTE_FLOW_ITEM_TYPE_ICMP;
+	else if (strcmp(type, "vlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VLAN;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ITEM_TYPE_VOID;
+}
+
+static const char *
+cpfl_json_t_to_string(json_t *object, const char *name)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return NULL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return NULL;
+	}
+
+	return json_string_value(subobject);
+}
+
+static int
+cpfl_json_t_to_uint16(json_t *object, const char *name, uint16_t *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (uint16_t)json_integer_value(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_json_t_to_uint32(json_t *object, const char *name, uint32_t *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (uint32_t)json_integer_value(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_attr(json_t *ob_pr_key_attrs, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len;
+	struct cpfl_flow_js_pr_key_attr *attr;
+
+	len = json_array_size(ob_pr_key_attrs);
+	js_pr->key.attributes = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_attr), 0);
+	if (!js_pr->key.attributes) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->key.attr_size = len;
+	attr = js_pr->key.attributes;
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		const char *name;
+		uint16_t value = 0;
+		int ret;
+
+		object = json_array_get(ob_pr_key_attrs, i);
+		name = cpfl_json_t_to_string(object, "Name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'Name'.");
+			goto err;
+		}
+		ret = cpfl_json_t_to_uint16(object, "Value", &value);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+			goto err;
+		}
+		if (strcmp(name, "ingress") == 0) {
+			attr->ingress = value;
+		} else if (strcmp(name, "egress") == 0) {
+			attr->egress = value;
+		} else {
+			/* TODO: more... */
+			PMD_DRV_LOG(ERR, "Not support attr name: %s.", name);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	rte_free(js_pr->key.attributes);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto_field(json_t *ob_fields,
+				     struct cpfl_flow_js_pr_key_proto *js_field)
+{
+	int len, i;
+
+	if (!ob_fields)
+		return 0;
+	len = json_array_size(ob_fields);
+	if (len == 0)
+		return 0;
+	js_field->fields_size = len;
+	js_field->fields =
+	    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len, 0);
+	if (!js_field->fields) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		const char *name, *mask;
+
+		object = json_array_get(ob_fields, i);
+		name = cpfl_json_t_to_string(object, "name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
+			goto err;
+		}
+		if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+			PMD_DRV_LOG(ERR, "The 'name' is too long.");
+			goto err;
+		}
+		memcpy(js_field->fields[i].name, name, strlen(name));
+
+		if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
+		    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			mask = cpfl_json_t_to_string(object, "mask");
+			if (!mask) {
+				PMD_DRV_LOG(ERR, "Can not parse string 'mask'.");
+				goto err;
+			}
+			if (strlen(mask) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+				PMD_DRV_LOG(ERR, "The 'mask' is too long.");
+				goto err;
+			}
+			memcpy(js_field->fields[i].mask, mask, strlen(mask));
+		} else {
+			uint32_t mask_32b;
+			int ret;
+
+			ret = cpfl_json_t_to_uint32(object, "mask", &mask_32b);
+			if (ret < 0) {
+				PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'.");
+				goto err;
+			}
+			js_field->fields[i].mask_32b = mask_32b;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_field->fields);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto(json_t *ob_pr_key_protos, struct cpfl_flow_js_pr *js_pr)
+{
+	int len, i, ret;
+
+	len = json_array_size(ob_pr_key_protos);
+	if (len == 0)
+		return 0;
+	js_pr->key.proto_size = len;
+	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto) * len, 0);
+	if (!js_pr->key.protocols) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_t *object, *ob_fields;
+		const char *type;
+		enum rte_flow_item_type item_type;
+
+		object = json_array_get(ob_pr_key_protos, i);
+		/* pr->key->proto->type */
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		item_type = cpfl_get_item_type_by_str(type);
+		if (item_type == RTE_FLOW_ITEM_TYPE_VOID)
+			goto err;
+		js_pr->key.protocols[i].type = item_type;
+		/* pr->key->proto->fields */
+		ob_fields = json_object_get(object, "fields");
+		ret = cpfl_flow_js_pattern_key_proto_field(ob_fields,
+							   &js_pr->key.protocols[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	rte_free(js_pr->key.protocols);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_proto(json_t *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	uint16_t layer = 0, offset = 0, mask = 0;
+	const char *header;
+	enum rte_flow_item_type type;
+	int ret;
+
+	ret = cpfl_json_t_to_uint16(ob_value, "layer", &layer);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+		return -EINVAL;
+	}
+
+	header = cpfl_json_t_to_string(ob_value, "header");
+	if (!header) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "offset", &offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "mask", &mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
+		return -EINVAL;
+	}
+	type = cpfl_get_item_type_by_str(header);
+	if (type == RTE_FLOW_ITEM_TYPE_VOID)
+		return -EINVAL;
+	js_fv->proto.layer = layer;
+	js_fv->proto.offset = offset;
+	js_fv->proto.mask = mask;
+	js_fv->proto.header = type;
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_metadata(json_t *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	int ret;
+
+	ret = cpfl_json_t_to_uint16(ob_value, "type", &js_fv->meta.type);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "offset", &js_fv->meta.offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "mask", &js_fv->meta.mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv(json_t *ob_fvs, struct cpfl_flow_js_pr_action *js_act)
+{
+	int len, i;
+
+	len = json_array_size(ob_fvs);
+	if (len == 0)
+		return 0;
+	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0);
+	if (!js_act->sem.fv) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_act->sem.fv_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_fv *js_fv;
+		json_t *object, *ob_value;
+		uint16_t offset = 0;
+		const char *type;
+		int ret;
+
+		js_fv = &js_act->sem.fv[i];
+		object = json_array_get(ob_fvs, i);
+		ret = cpfl_json_t_to_uint16(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_fv->offset = offset;
+
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		ob_value = json_object_get(object, "value");
+		if (strcmp(type, "immediate") == 0) {
+			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
+			js_fv->immediate = json_integer_value(ob_value);
+		} else if (strcmp(type, "metadata") == 0) {
+			js_fv->type = CPFL_FV_TYPE_METADATA;
+			cpfl_flow_js_pattern_act_fv_metadata(ob_value, js_fv);
+		} else if (strcmp(type, "protocol") == 0) {
+			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
+			cpfl_flow_js_pattern_act_fv_proto(ob_value, js_fv);
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_act->sem.fv);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_per_act(json_t *ob_per_act, struct cpfl_flow_js_pr_action *js_act)
+{
+	const char *type;
+	int ret;
+
+	/* pr->actions->type */
+	type = cpfl_json_t_to_string(ob_per_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* pr->actions->data */
+	if (strcmp(type, "sem") == 0) {
+		json_t *ob_fvs, *ob_sem;
+
+		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+		ob_sem = json_object_get(ob_per_act, "data");
+		ret = cpfl_json_t_to_uint16(ob_sem, "profile", &js_act->sem.prof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_t_to_uint16(ob_sem, "subprofile", &js_act->sem.subprof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_t_to_uint16(ob_sem, "keysize", &js_act->sem.keysize);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
+			return -EINVAL;
+		}
+		ob_fvs = json_object_get(ob_sem, "fieldvectors");
+		ret = cpfl_flow_js_pattern_act_fv(ob_fvs, js_act);
+		if (ret < 0)
+			return ret;
+	} else {
+		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act(json_t *ob_pr_acts, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len, ret;
+
+	len = json_array_size(ob_pr_acts);
+	if (len == 0)
+		return 0;
+	js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action) * len, 0);
+	if (!js_pr->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->actions_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_pr_action *js_act;
+		json_t *object;
+
+		object = json_array_get(ob_pr_acts, i);
+		js_act = &js_pr->actions[i];
+		ret = cpfl_flow_js_pattern_per_act(object, js_act);
+		if (ret < 0) {
+			rte_free(js_pr->actions);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * The patterns object array defines a set of rules directing the PMD to match sequences of
+ * rte_flow protocol headers and translate them into profile/field vectors for each pipeline
+ * stage. This object is mandatory.
+ */
+static int
+cpfl_flow_js_pattern_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_t *ob_prs;
+	int i, len;
+
+	/* Pattern Rules */
+	ob_prs = json_object_get(ob_root, "patterns");
+	if (!ob_prs) {
+		PMD_DRV_LOG(ERR, "The patterns is mandatory.");
+		return -EINVAL;
+	}
+
+	len = json_array_size(ob_prs);
+	if (len == 0)
+		return 0;
+	parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len, 0);
+	if (!parser->patterns) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	parser->pr_size = len;
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		json_t *ob_pr_actions;
+		json_t *ob_pr_key;
+		json_t *ob_pr_key_protos;
+		json_t *ob_pr_key_attrs;
+		int ret;
+
+		object = json_array_get(ob_prs, i);
+		/* pr->key */
+		ob_pr_key = json_object_get(object, "key");
+		/* pr->key->protocols */
+		ob_pr_key_protos = json_object_get(ob_pr_key, "protocols");
+		ret = cpfl_flow_js_pattern_key_proto(ob_pr_key_protos, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->protocols.");
+			goto err;
+		}
+		/* pr->key->attributes */
+		ob_pr_key_attrs = json_object_get(ob_pr_key, "attributes");
+		ret = cpfl_flow_js_pattern_key_attr(ob_pr_key_attrs, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->attributes.");
+			goto err;
+		}
+		/* pr->actions */
+		ob_pr_actions = json_object_get(object, "actions");
+		ret = cpfl_flow_js_pattern_act(ob_pr_actions, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->patterns);
+	return -EINVAL;
+}
+
+static int
+cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	int ret = 0;
+
+	ret = cpfl_flow_js_pattern_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char *filename)
+{
+	struct cpfl_flow_js_parser *parser;
+	json_error_t json_error;
+	json_t *root;
+	int ret;
+
+	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser), 0);
+	if (!parser) {
+		PMD_DRV_LOG(ERR, "Not enough memory to create flow parser.");
+		return -ENOMEM;
+	}
+	root = json_load_file(filename, 0, &json_error);
+	if (!root) {
+		PMD_DRV_LOG(ERR, "Bad JSON file \"%s\": %s", filename, json_error.text);
+		goto free_parser;
+	}
+	ret = cpfl_parser_init(root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parser init failed.");
+		goto free_parser;
+	}
+	*flow_parser = parser;
+	json_decref(root);
+
+	return 0;
+free_parser:
+	rte_free(parser);
+	return -EINVAL;
+}
+
+static void
+cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
+{
+	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM)
+		rte_free(pr_act->sem.fv);
+}
+
+int
+cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
+{
+	int i, j;
+
+	if (!parser)
+		return 0;
+
+	for (i = 0; i < parser->pr_size; i++) {
+		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
+
+		if (!pattern)
+			continue;
+		for (j = 0; j < pattern->key.proto_size; j++)
+			rte_free(pattern->key.protocols[j].fields);
+		rte_free(pattern->key.protocols);
+		rte_free(pattern->key.attributes);
+
+		for (j = 0; j < pattern->actions_size; j++) {
+			struct cpfl_flow_js_pr_action *pr_act;
+
+			pr_act = &pattern->actions[j];
+			cpfl_parser_free_pr_action(pr_act);
+		}
+		rte_free(pattern->actions);
+	}
+	rte_free(parser->patterns);
+	rte_free(parser);
+
+	return 0;
+}
+
+static int
+cpfl_get_items_length(const struct rte_flow_item *items)
+{
+	int length = 0;
+	const struct rte_flow_item *item = items;
+
+	while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
+		       uint16_t offset, uint8_t *fv)
+{
+	uint16_t v_layer, v_offset, v_mask;
+	enum rte_flow_item_type v_header;
+	int j, layer, length;
+	uint16_t temp_fv;
+
+	length = cpfl_get_items_length(items);
+	v_layer = js_fv->proto.layer;
+	v_header = js_fv->proto.header;
+	v_offset = js_fv->proto.offset;
+	v_mask = js_fv->proto.mask;
+	layer = 0;
+	for (j = 0; j < length - 1; j++) {
+		if (items[j].type == v_header) {
+			if (layer == v_layer) {
+				/* copy out 16 bits from offset */
+				const uint8_t *pointer;
+
+				pointer = &(((const uint8_t *)(items[j].spec))[v_offset]);
+				temp_fv = ntohs((*((const uint16_t *)pointer)) & v_mask);
+				fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+				fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+				break;
+			}
+			layer++;
+		} /* TODO: more type... */
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_fieldvectors(struct cpfl_itf *itf, struct cpfl_flow_js_fv *js_fvs, int size,
+			uint8_t *fv, const struct rte_flow_item *items)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		uint16_t offset, temp_fv, value_int;
+		enum cpfl_flow_js_fv_type type;
+		struct cpfl_flow_js_fv *js_fv;
+
+		js_fv = &js_fvs[i];
+		offset = js_fv->offset;
+		type = js_fv->type;
+		if (type == CPFL_FV_TYPE_IMMEDIATE) {
+			value_int = js_fv->immediate;
+			temp_fv = (value_int << 8) & 0xff00;
+			fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+		} else if (type == CPFL_FV_TYPE_METADATA) {
+			uint16_t type, v_offset, mask;
+
+			type = js_fv->meta.type;
+			v_offset = js_fv->meta.offset;
+			mask = js_fv->meta.mask;
+			temp_fv = cpfl_metadata_read16(&itf->adapter->meta, type, v_offset) & mask;
+			fv[2 * offset] = (uint8_t)(temp_fv & 0x00ff);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv >> 8);
+		} else if (type == CPFL_FV_TYPE_PROTOCOL) {
+			ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv);
+			if (ret)
+				return ret;
+		} else {
+			PMD_DRV_LOG(DEBUG, "not support this type: %d.", type);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_pr_actions(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_pr_action *actions,
+		      int size,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		struct cpfl_flow_js_pr_action *pr_act;
+		enum cpfl_flow_pr_action_type type;
+
+		pr_act = &actions[i];
+		/* pr->actions->type */
+		type = pr_act->type;
+		/* pr->actions->data */
+		if (attr->group == 1  && type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+			struct cpfl_flow_js_pr_action_sem *sem = &pr_act->sem;
+
+			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+			pr_action->sem.prof = sem->prof;
+			pr_action->sem.subprof = sem->subprof;
+			pr_action->sem.keysize = sem->keysize;
+			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
+			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
+			ret = cpfl_parse_fieldvectors(itf, sem->fv, sem->fv_size,
+						      pr_action->sem.cpfl_flow_pr_fv, items);
+			return ret;
+		} else if (attr->group > 4 || attr->group == 0) {
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_eth_mask(const char *mask, struct rte_ether_addr addr)
+{
+	int i, ret;
+	struct rte_ether_addr mask_bytes;
+
+	ret = rte_ether_unformat_addr(mask, &mask_bytes);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "translate mac address from string to rte_ether_addr failed.");
+		return -EINVAL;
+	}
+	/* validate eth mask addr if match */
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+		if (mask_bytes.addr_bytes[i] != addr.addr_bytes[i])
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
+{
+	uint32_t out_addr;
+
+	/* 0: success; -EINVAL: invalid; -ENOTSUP: fail */
+	int ret = inet_pton(AF_INET, mask, &out_addr);
+
+	if (ret < 0)
+		return -EINVAL;
+	/* validate ipv4 mask addr if match */
+	if (out_addr != addr)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_eth *eth_mask)
+{
+	int field_size, j;
+	int flag_dst_addr, flag_src_addr, flag_ether_type;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !eth_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_ether_type = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name, *s_mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst, more see Field Mapping
+		 */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		if (strcmp(name, "src_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->src) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->dst) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "ether_type") == 0) {
+			uint16_t mask = (uint16_t)field->mask_32b;
+
+			if (mask != eth_mask->type)
+				return -EINVAL;
+			flag_ether_type = true;
+		} else {
+			/* TODO: more type... */
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (strcmp((const char *)eth_mask->src.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (strcmp((const char *)eth_mask->dst.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_ether_type) {
+		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+	int field_size, j;
+	int flag_next_proto_id, flag_src_addr, flag_dst_addr;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !ipv4_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_next_proto_id = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+
+		field = &proto->fields[j];
+		name = field->name;
+		if (strcmp(name, "src_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.src_addr) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.dst_addr) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "next_proto_id") == 0) {
+			uint8_t mask;
+
+			mask = (uint8_t)field->mask_32b;
+			if (mask != ipv4_mask->hdr.next_proto_id)
+				return -EINVAL;
+			flag_next_proto_id = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_next_proto_id) {
+		if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_tcp *tcp_mask)
+{
+	int field_size, j;
+	int flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !tcp_mask)
+		return 0;
+
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (tcp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (tcp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (tcp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (tcp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_udp *udp_mask)
+{
+	int field_size, j;
+	bool flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && !udp_mask)
+		return 0;
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst */
+		name = field->name; /* match: rte_flow_item->mask */
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (udp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (udp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name: %s.", name);
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (udp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (udp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto,
+		 const struct rte_flow_item_vxlan *vxlan_mask)
+{
+	int field_size, j;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && !vxlan_mask)
+		return 0;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		int64_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		mask = (int64_t)field->mask_32b;
+		if (strcmp(name, "vx_vni") == 0) {
+			if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) != mask)
+				return -EINVAL;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_icmp *icmp_mask)
+{
+	int field_size;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if ((field_size != 0 && !icmp_mask) || (field_size == 0 && icmp_mask))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
+			     int proto_size,
+			     const struct rte_flow_item *items)
+{
+	int i, length;
+	int j = 0;
+
+	length = cpfl_get_items_length(items);
+	if (proto_size > length - 1)
+		return -EINVAL;
+	for (i = 0; i < proto_size; i++) {
+		struct cpfl_flow_js_pr_key_proto *key_proto;
+		enum rte_flow_item_type type;
+
+		key_proto = &protocols[i];
+		/* pr->key->proto->type */
+		type = key_proto->type;
+		/* pr->key->proto->fields */
+		switch (type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
+				const struct rte_flow_item_eth *eth_mask;
+				int ret;
+
+				eth_mask = (const struct rte_flow_item_eth *)items[i].mask;
+				ret = cpfl_check_eth(key_proto, eth_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				const struct rte_flow_item_ipv4 *ipv4_mask;
+				int ret;
+
+				ipv4_mask = (const struct rte_flow_item_ipv4 *)items[i].mask;
+				ret = cpfl_check_ipv4(key_proto, ipv4_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) {
+				const struct rte_flow_item_tcp *tcp_mask;
+				int ret;
+
+				tcp_mask = (const struct rte_flow_item_tcp *)items[i].mask;
+				ret = cpfl_check_tcp(key_proto, tcp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) {
+				const struct rte_flow_item_udp *udp_mask;
+				int ret;
+
+				udp_mask = (const struct rte_flow_item_udp *)items[i].mask;
+				ret = cpfl_check_udp(key_proto, udp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+				const struct rte_flow_item_vxlan *vxlan_mask;
+				int ret;
+
+				vxlan_mask = (const struct rte_flow_item_vxlan *)items[i].mask;
+				ret = cpfl_check_vxlan(key_proto, vxlan_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) {
+				const struct rte_flow_item_icmp *icmp_mask;
+				int ret;
+
+				icmp_mask = (const struct rte_flow_item_icmp *)items[i].mask;
+				ret = cpfl_check_icmp(key_proto, icmp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+	if (items[j].type != RTE_FLOW_ITEM_TYPE_END)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr,
+			    const struct rte_flow_attr *attr)
+{
+	if (key_attr->ingress != attr->ingress) {
+		PMD_DRV_LOG(DEBUG, "ingress not match.");
+		return -EINVAL;
+	}
+	if (key_attr->egress != attr->egress) {
+		PMD_DRV_LOG(DEBUG, "egress not match.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern,
+		       const struct rte_flow_item *items,
+		       const struct rte_flow_attr *attr)
+{
+	int ret;
+
+	/* pr->key */
+	/* pr->key->protocols */
+	ret = cpfl_check_pattern_key_proto(pattern->key.protocols,
+					   pattern->key.proto_size, items);
+	if (ret < 0)
+		return -EINVAL;
+	/* pr->key->attributes */
+	ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr);
+	if (ret < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* output: struct cpfl_flow_pr_action* pr_action */
+int
+cpfl_flow_parse_items(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_parser *parser,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, size;
+	struct cpfl_flow_js_pr *pattern;
+
+	size = parser->pr_size;
+	for (i = 0; i < size; i++) {
+		int ret;
+
+		pattern = &parser->patterns[i];
+		ret = cpfl_check_pattern_key(pattern, items, attr);
+		if (ret < 0)
+			continue;
+		/* pr->actions */
+		ret = cpfl_parse_pr_actions(itf, pattern->actions, pattern->actions_size,
+					    items, attr, pr_action);
+		return ret;
+	}
+
+	return -EINVAL;
+}
+
+bool
+cpfl_metadata_write_port_id(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 5;
+
+	dev_id = cpfl_get_port_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID\n");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 3);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_targetvsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 2;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 1);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 0;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
+
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 24;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
new file mode 100644
index 0000000000..367a6da574
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <jansson.h>
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#ifndef _CPFL_FLOW_PARSER_H_
+#define _CPFL_FLOW_PARSER_H_
+
+#define CPFL_FLOW_JSON_STR_SIZE_MAX 100
+#define CPFL_MAX_SEM_FV_KEY_SIZE 64
+#define CPFL_FLOW_JS_PROTO_SIZE 16
+#define CPFL_MOD_KEY_NUM_MAX 8
+
+/* Pattern Rules Storage */
+enum cpfl_flow_pr_action_type {
+	CPFL_JS_PR_ACTION_TYPE_SEM,
+	CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
+};
+
+/* This structure matches a sequence of fields in struct rte_flow_attr */
+struct cpfl_flow_js_pr_key_attr {
+	uint16_t ingress;
+	uint16_t egress;
+};
+
+struct cpfl_flow_js_pr_key_proto_field {
+	char name[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	union {
+		char mask[CPFL_FLOW_JSON_STR_SIZE_MAX];
+		uint32_t mask_32b;
+	};
+};
+
+/* This structure matches a sequence of "struct rte_flow_item" */
+struct cpfl_flow_js_pr_key_proto {
+	enum rte_flow_item_type type;
+	struct cpfl_flow_js_pr_key_proto_field *fields;
+	int fields_size;
+};
+
+enum cpfl_flow_js_fv_type {
+	CPFL_FV_TYPE_PROTOCOL,
+	CPFL_FV_TYPE_IMMEDIATE,
+	CPFL_FV_TYPE_METADATA,
+	CPFL_FV_TYPE_UNKNOWN = -1,
+};
+
+struct cpfl_flow_js_fv {
+	uint16_t offset;
+	enum cpfl_flow_js_fv_type type;
+	union {
+		/*  a 16 bits value */
+		uint16_t immediate;
+		/* a reference to a protocol header with a <header, layer, offset, mask> tuple */
+		struct {
+			enum rte_flow_item_type header;
+			uint16_t layer;
+			uint16_t offset;
+			uint16_t mask;
+		} proto;
+		/* a reference to a metadata */
+		struct {
+			uint16_t type;
+			uint16_t offset;
+			uint16_t mask;
+		} meta;
+	};
+};
+
+/**
+ * This structure defines the message be used to composite the
+ * profile / key of a SEM control packet
+ */
+struct cpfl_flow_js_pr_action_sem {
+	uint16_t prof;		    /* SEM profile ID */
+	uint16_t subprof;	    /* SEM subprofile ID */
+	uint16_t keysize;	    /*  extract key size in bytes */
+	struct cpfl_flow_js_fv *fv; /* A SEM field vector array */
+	int fv_size;
+};
+
+/* define how to map current key to low level pipeline configuration */
+struct cpfl_flow_js_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_js_pr_action_sem sem;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD how to parse rte_flow
+ * protocol headers. Each rule be described by a key object and a action array.
+ */
+struct cpfl_flow_js_pr {
+	struct {
+		struct cpfl_flow_js_pr_key_proto *protocols;
+		uint16_t proto_size;
+		struct cpfl_flow_js_pr_key_attr *attributes;
+		uint16_t attr_size;
+	} key;
+	/* An array to define how to map current key to low level pipeline configuration. */
+	struct cpfl_flow_js_pr_action *actions;
+	uint16_t actions_size;
+};
+
+struct cpfl_flow_js_parser {
+	struct cpfl_flow_js_pr *patterns;
+	int pr_size;
+};
+
+/* Pattern Rules */
+struct cpfl_flow_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	uint8_t cpfl_flow_pr_fv[CPFL_MAX_SEM_FV_KEY_SIZE];
+};
+
+struct cpfl_flow_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_pr_action_sem sem;
+	};
+};
+
+int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
+int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
+int cpfl_flow_parse_items(struct cpfl_itf *itf,
+			  struct cpfl_flow_js_parser *parser,
+			  const struct rte_flow_item *items,
+			  const struct rte_flow_attr *attr,
+			  struct cpfl_flow_pr_action *pr_action);
+bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf);
+
+static inline void
+cpfl_metadata_init(struct cpfl_metadata *meta)
+{
+	int i;
+
+	for (i = 0; i < CPFL_META_LENGTH; i++)
+		meta->chunks[i].type = i;
+}
+
+static inline void
+cpfl_metadata_write16(struct cpfl_metadata *meta, int type, int offset, uint16_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint16_t));
+}
+
+static inline void
+cpfl_metadata_write32(struct cpfl_metadata *meta, int type, int offset, uint32_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint32_t));
+}
+
+static inline uint16_t
+cpfl_metadata_read16(struct cpfl_metadata *meta, int type, int offset)
+{
+	return *((uint16_t *)(&meta->chunks[type].data[offset]));
+}
+
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d8b92ae16a..d767818eb7 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -38,3 +38,10 @@ if arch_subdir == 'x86'
         cflags += ['-DCC_AVX512_SUPPORT']
     endif
 endif
+
+if dpdk_conf.has('RTE_HAS_JANSSON')
+    sources += files(
+            'cpfl_flow_parser.c',
+    )
+    ext_deps += jansson_dep
+endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v7 2/8] net/cpfl: build action mapping rules from JSON
  2023-09-26 18:16         ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
  2023-09-26 18:16           ` [PATCH v7 1/8] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
@ 2023-09-26 18:16           ` yuying.zhang
  2023-09-26 18:16           ` [PATCH v7 3/8] net/cpfl: set up rte flow skeleton yuying.zhang
                             ` (6 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-26 18:16 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Build rules that maps from an rte flow action vxlan_encap or
vxlan_decap to hardware representations.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_flow_parser.c | 538 +++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_flow_parser.h | 100 ++++++
 2 files changed, 637 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
index 308f9c9736..81ccbbd8e5 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.c
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -32,6 +32,18 @@ cpfl_get_item_type_by_str(const char *type)
 	return RTE_FLOW_ITEM_TYPE_VOID;
 }
 
+static enum rte_flow_action_type
+cpfl_get_action_type_by_str(const char *type)
+{
+	if (strcmp(type, "vxlan_encap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	else if (strcmp(type, "vxlan_decap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ACTION_TYPE_VOID;
+}
+
 static const char *
 cpfl_json_t_to_string(json_t *object, const char *name)
 {
@@ -50,6 +62,29 @@ cpfl_json_t_to_string(json_t *object, const char *name)
 	return json_string_value(subobject);
 }
 
+static int
+cpfl_json_t_to_int(json_t *object, const char *name, int *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (int)json_integer_value(subobject);
+
+	return 0;
+}
+
 static int
 cpfl_json_t_to_uint16(json_t *object, const char *name, uint16_t *value)
 {
@@ -522,6 +557,228 @@ cpfl_flow_js_pattern_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 	return -EINVAL;
 }
 
+static int
+cpfl_flow_js_mr_key(json_t *ob_mr_keys, struct cpfl_flow_js_mr_key *js_mr_key)
+{
+	int len, i;
+
+	len = json_array_size(ob_mr_keys);
+	if (len == 0)
+		return 0;
+	js_mr_key->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_key_action) * len, 0);
+	if (!js_mr_key->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_mr_key->actions_size = len;
+	for (i = 0; i < len; i++) {
+		json_t *object, *ob_data;
+		const char *type;
+		enum rte_flow_action_type act_type;
+
+		object = json_array_get(ob_mr_keys, i);
+		/* mr->key->actions->type */
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		act_type = cpfl_get_action_type_by_str(type);
+		if (act_type == RTE_FLOW_ACTION_TYPE_VOID)
+			goto err;
+		js_mr_key->actions[i].type = act_type;
+		/* mr->key->actions->data */
+		ob_data = json_object_get(object, "data");
+		if (js_mr_key->actions[i].type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			json_t *ob_protos;
+			int proto_size, j;
+			struct cpfl_flow_js_mr_key_action_vxlan_encap *encap;
+
+			ob_protos = json_object_get(ob_data, "protocols");
+			encap = &js_mr_key->actions[i].encap;
+			if (!ob_protos) {
+				encap->proto_size = 0;
+				continue;
+			}
+			proto_size = json_array_size(ob_protos);
+			encap->proto_size = proto_size;
+			for (j = 0; j < proto_size; j++) {
+				const char *s;
+				json_t *subobject;
+				enum rte_flow_item_type proto_type;
+
+				subobject = json_array_get(ob_protos, j);
+				s = json_string_value(subobject);
+				proto_type = cpfl_get_item_type_by_str(s);
+				if (proto_type == RTE_FLOW_ITEM_TYPE_VOID) {
+					PMD_DRV_LOG(ERR, "parse VXLAN_ENCAP failed.");
+					goto err;
+				}
+				encap->protocols[j] = proto_type;
+			}
+		} else if (js_mr_key->actions[i].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			PMD_DRV_LOG(ERR, "not support this type: %d.", js_mr_key->actions[i].type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mr_key->actions);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_layout(json_t *ob_layouts, struct cpfl_flow_js_mr_action_mod *js_mod)
+{
+	int len, i;
+
+	len = json_array_size(ob_layouts);
+	js_mod->layout_size = len;
+	if (len == 0)
+		return 0;
+	js_mod->layout = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_layout) * len, 0);
+	if (!js_mod->layout) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		int index = 0, size = 0, offset = 0;
+		int ret;
+		const char *hint;
+
+		object = json_array_get(ob_layouts, i);
+		ret = cpfl_json_t_to_int(object, "index", &index);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'index'.");
+			goto err;
+		}
+		js_mod->layout[i].index = index;
+		ret = cpfl_json_t_to_int(object, "size", &size);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+			goto err;
+		}
+		js_mod->layout[i].size = size;
+		ret = cpfl_json_t_to_int(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_mod->layout[i].offset = offset;
+		hint = cpfl_json_t_to_string(object, "hint");
+		if (!hint) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'hint'.");
+			goto err;
+		}
+		memcpy(js_mod->layout[i].hint, hint, strlen(hint));
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mod->layout);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_action(json_t *ob_mr_act, struct cpfl_flow_js_mr_action *js_mr_act)
+{
+	json_t *ob_data;
+	const char *type;
+
+	/* mr->action->type */
+	type = cpfl_json_t_to_string(ob_mr_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* mr->action->data */
+	ob_data = json_object_get(ob_mr_act, "data");
+	if (strcmp(type, "mod") == 0) {
+		json_t *ob_layouts;
+		uint16_t profile = 0;
+		int ret;
+
+		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		ret = cpfl_json_t_to_uint16(ob_data, "profile", &profile);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		js_mr_act->mod.prof = profile;
+		ob_layouts = json_object_get(ob_data, "layout");
+		ret = cpfl_flow_js_mr_layout(ob_layouts, &js_mr_act->mod);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse layout.");
+			return ret;
+		}
+	} else  {
+		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * The modifications object array defines a set of rules for the PMD to match rte_flow
+ * modification actions and translate them into the Modification profile. This object
+ * is optional.
+ */
+static int
+cpfl_flow_js_mod_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_t *ob_mrs;
+	int i, len;
+
+	ob_mrs = json_object_get(ob_root, "modifications");
+	if (!ob_mrs) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+	len = json_array_size(ob_mrs);
+	if (len == 0)
+		return 0;
+	parser->mr_size = len;
+	parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr) * len, 0);
+	if (!parser->modifications) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		int ret;
+		json_t *object, *ob_mr_key, *ob_mr_action, *ob_mr_key_action;
+
+		object = json_array_get(ob_mrs, i);
+		/* mr->key */
+		ob_mr_key = json_object_get(object, "key");
+		/* mr->key->actions */
+		ob_mr_key_action = json_object_get(ob_mr_key, "actions");
+		ret = cpfl_flow_js_mr_key(ob_mr_key_action, &parser->modifications[i].key);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_key failed.");
+			goto err;
+		}
+		/* mr->action */
+		ob_mr_action = json_object_get(object, "action");
+		ret = cpfl_flow_js_mr_action(ob_mr_action, &parser->modifications[i].action);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_action failed.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->modifications);
+	return -EINVAL;
+}
+
 static int
 cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 {
@@ -532,6 +789,11 @@ cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
 		return ret;
 	}
+	ret = cpfl_flow_js_mod_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
+		return ret;
+	}
 
 	return 0;
 }
@@ -602,6 +864,15 @@ cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
 		rte_free(pattern->actions);
 	}
 	rte_free(parser->patterns);
+	for (i = 0; i < parser->mr_size; i++) {
+		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
+
+		if (!mr)
+			continue;
+		rte_free(mr->key.actions);
+		rte_free(mr->action.mod.layout);
+	}
+	rte_free(parser->modifications);
 	rte_free(parser);
 
 	return 0;
@@ -618,6 +889,17 @@ cpfl_get_items_length(const struct rte_flow_item *items)
 	return length;
 }
 
+static int
+cpfl_get_actions_length(const struct rte_flow_action *actions)
+{
+	int length = 0;
+	const struct rte_flow_action *action = actions;
+
+	while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END)
+		continue;
+	return length;
+}
+
 static int
 cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
 		       uint16_t offset, uint8_t *fv)
@@ -646,7 +928,7 @@ cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item
 				break;
 			}
 			layer++;
-		} /* TODO: more type... */
+		}
 	}
 
 	return 0;
@@ -1235,6 +1517,260 @@ cpfl_flow_parse_items(struct cpfl_itf *itf,
 	return -EINVAL;
 }
 
+/* modifications rules */
+static int
+cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap *encap,
+			       const struct rte_flow_action *action)
+{
+	const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+	struct rte_flow_item *definition;
+	int def_length, i, proto_size;
+
+	action_vxlan_encap = (const struct rte_flow_action_vxlan_encap *)action->conf;
+	definition = action_vxlan_encap->definition;
+	def_length = cpfl_get_items_length(definition);
+	proto_size = encap->proto_size;
+	if (proto_size != def_length - 1) {
+		PMD_DRV_LOG(DEBUG, "protocols not match.");
+		return -EINVAL;
+	}
+	for (i = 0; i < proto_size; i++) {
+		enum rte_flow_item_type proto;
+
+		proto = encap->protocols[i];
+		if (proto == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) {
+				PMD_DRV_LOG(DEBUG, "protocols not match.");
+				return -EINVAL;
+			}
+		} else if (proto != definition[i].type) {
+			PMD_DRV_LOG(DEBUG, "protocols not match.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* check and parse */
+static int
+cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int size,
+			 const struct rte_flow_action *actions,
+			 struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int actions_length, i;
+	int j = 0;
+	int ret;
+
+	actions_length = cpfl_get_actions_length(actions);
+	if (size > actions_length - 1)
+		return -EINVAL;
+	for (i = 0; i < size; i++) {
+		enum rte_flow_action_type type;
+		struct cpfl_flow_js_mr_key_action *key_act;
+
+		key_act = &key_acts[i];
+		/* mr->key->actions->type */
+		type = key_act->type;
+		/* mr->key->actions->data */
+		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			int proto_size, k;
+			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
+
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+			mr_key_action[i].encap.action = &actions[j];
+			encap = &mr_key_action[i].encap;
+
+			proto_size = key_act->encap.proto_size;
+			encap->proto_size = proto_size;
+			for (k = 0; k < proto_size; k++) {
+				enum rte_flow_item_type proto;
+
+				proto = key_act->encap.protocols[k];
+				encap->protocols[k] = proto;
+			}
+			ret = cpfl_check_actions_vxlan_encap(encap, &actions[j]);
+			if (ret < 0)
+				return -EINVAL;
+			j++;
+		} else if (type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+			j++;
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+/* output: uint8_t *buffer, uint16_t *byte_len */
+static int
+cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
+		  struct cpfl_flow_mr_key_action *mr_key_action,
+		  uint8_t *buffer, uint16_t *byte_len)
+{
+	int i;
+	int start = 0;
+
+	for (i = 0; i < layout_size; i++) {
+		int index, size, offset;
+		const char *hint;
+		const uint8_t *addr = NULL;
+		struct cpfl_flow_mr_key_action *temp;
+		struct cpfl_flow_js_mr_layout *layout;
+
+		layout = &layouts[i];
+		/* index links to the element of the actions array. */
+		index = layout->index;
+		size = layout->size;
+		offset = layout->offset;
+		if (index == -1) {
+			hint = "dummpy";
+			start += size;
+			continue;
+		}
+		hint = layout->hint;
+		temp = mr_key_action + index;
+		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+			struct rte_flow_item *definition;
+			int def_length, k;
+
+			action_vxlan_encap =
+			    (const struct rte_flow_action_vxlan_encap *)temp->encap.action->conf;
+			definition = action_vxlan_encap->definition;
+			def_length = cpfl_get_items_length(definition);
+			for (k = 0; k < def_length - 1; k++) {
+				if ((strcmp(hint, "eth") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_ETH) ||
+				    (strcmp(hint, "ipv4") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+				    (strcmp(hint, "udp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_UDP) ||
+				    (strcmp(hint, "tcp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_TCP) ||
+				    (strcmp(hint, "vxlan") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_VXLAN)) {
+					addr = (const uint8_t *)(definition[k].spec);
+					if (start > 255) {
+						*byte_len = 0;
+						PMD_DRV_LOG(ERR, "byte length is too long: %s",
+							    hint);
+						return -EINVAL;
+					}
+					memcpy(buffer + start, addr + offset, size);
+					break;
+				} /* TODO: more hint... */
+			}
+			if (k == def_length - 1) {
+				*byte_len = 0;
+				PMD_DRV_LOG(ERR, "can not find corresponding hint: %s", hint);
+				return -EINVAL;
+			}
+		} else {
+			*byte_len = 0;
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", temp->type);
+			return -EINVAL;
+		} /* else TODO: more type... */
+		start += size;
+	}
+	*byte_len = start;
+
+	return 0;
+}
+
+static int
+cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action,
+		     struct cpfl_flow_mr_key_action *mr_key_action,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	enum cpfl_flow_mr_action_type type;
+
+	/* mr->action->type */
+	type = action->type;
+	/* mr->action->data */
+	if (type == CPFL_JS_MR_ACTION_TYPE_MOD) {
+		struct cpfl_flow_js_mr_layout *layout;
+
+		mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		mr_action->mod.byte_len = 0;
+		mr_action->mod.prof = action->mod.prof;
+		layout = action->mod.layout;
+		if (!layout)
+			return 0;
+		memset(mr_action->mod.data, 0, sizeof(mr_action->mod.data));
+
+		return cpfl_parse_layout(layout, action->mod.layout_size, mr_key_action,
+					 mr_action->mod.data, &mr_action->mod.byte_len);
+	}
+	PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+
+	return -EINVAL;
+}
+
+static int
+cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action *actions,
+		   struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int key_action_size;
+
+	/* mr->key->actions */
+	key_action_size = mr->key.actions_size;
+	return cpfl_parse_mr_key_action(mr->key.actions, key_action_size, actions, mr_key_action);
+}
+
+/* output: struct cpfl_flow_mr_action *mr_action */
+static int
+cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	int i;
+	struct cpfl_flow_mr_key_action mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0};
+
+	for (i = 0; i < parser->mr_size; i++) {
+		int ret;
+		struct cpfl_flow_js_mr *mr;
+
+		mr = &parser->modifications[i];
+		if (!mr)
+			return -EINVAL;
+		ret = cpfl_check_mod_key(mr, actions, mr_key_action);
+		if (ret < 0)
+			continue;
+		/* mr->action */
+		return cpfl_parse_mr_action(&mr->action, mr_key_action, mr_action);
+	}
+
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+			struct cpfl_flow_mr_action *mr_action)
+{
+	/* modifications rules */
+	if (!parser->modifications) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	return cpfl_parse_mod_rules(parser, actions, mr_action);
+}
+
 bool
 cpfl_metadata_write_port_id(struct cpfl_itf *itf)
 {
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
index 367a6da574..b7bf21bd76 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.h
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -105,9 +105,79 @@ struct cpfl_flow_js_pr {
 	uint16_t actions_size;
 };
 
+/* Modification Rules Storage */
+/**
+ * The vxlan_encap action matches RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
+ * and include a sequence of protocol headers defined in field protocols
+ * of data.
+ */
+struct cpfl_flow_js_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	int proto_size;
+};
+
+/* A set of modification rte_flow_action_xxx objects can be defined as a type / data pair. */
+struct cpfl_flow_js_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_js_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_js_mr_key {
+	struct cpfl_flow_js_mr_key_action *actions;
+	int actions_size;
+};
+
+struct cpfl_flow_js_mr_layout {
+	int index;				/* links to the element of the actions array */
+	char hint[CPFL_FLOW_JSON_STR_SIZE_MAX]; /* where the data to copy from */
+	uint16_t offset;			/* the start byte of the data to copy from */
+	uint16_t size; /*  bytes of the data to be copied to the memory region */
+};
+
+/** For mod data, besides the profile ID, a layout array defines a set of hints that helps
+ * driver composing the MOD memory region when the action need to insert/update some packet
+ * data from user input.
+ */
+struct cpfl_flow_js_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	struct cpfl_flow_js_mr_layout *layout;
+	int layout_size;
+};
+
+enum cpfl_flow_mr_action_type {
+	CPFL_JS_MR_ACTION_TYPE_MOD,
+};
+
+/** Currently, the type can only be mod.
+ *
+ * For mod data, besides the profile ID, a layout array defines a set
+ * of hints that helps driver composing the MOD memory region when the
+ * action need to insert/update some packet data from user input.
+ */
+struct cpfl_flow_js_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_js_mr_action_mod mod;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD to parse rte_flow modification
+ * actions. Each rule be described by a pair of key and action
+ */
+struct cpfl_flow_js_mr {
+	struct cpfl_flow_js_mr_key key;
+	struct cpfl_flow_js_mr_action action;
+};
+
 struct cpfl_flow_js_parser {
 	struct cpfl_flow_js_pr *patterns;
 	int pr_size;
+	struct cpfl_flow_js_mr *modifications;
+	int mr_size;
 };
 
 /* Pattern Rules */
@@ -125,6 +195,33 @@ struct cpfl_flow_pr_action {
 	};
 };
 
+/* Modification Rules */
+struct cpfl_flow_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	uint16_t proto_size;
+	const struct rte_flow_action *action;
+};
+
+struct cpfl_flow_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	uint8_t data[256];
+};
+
+struct cpfl_flow_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_mr_action_mod mod;
+	};
+};
+
 int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
 int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
 int cpfl_flow_parse_items(struct cpfl_itf *itf,
@@ -132,6 +229,9 @@ int cpfl_flow_parse_items(struct cpfl_itf *itf,
 			  const struct rte_flow_item *items,
 			  const struct rte_flow_attr *attr,
 			  struct cpfl_flow_pr_action *pr_action);
+int cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser,
+			    const struct rte_flow_action *actions,
+			    struct cpfl_flow_mr_action *mr_action);
 bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
 bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
 bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v7 3/8] net/cpfl: set up rte flow skeleton
  2023-09-26 18:16         ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
  2023-09-26 18:16           ` [PATCH v7 1/8] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
  2023-09-26 18:16           ` [PATCH v7 2/8] net/cpfl: build action mapping rules from JSON yuying.zhang
@ 2023-09-26 18:16           ` yuying.zhang
  2023-09-26 18:16           ` [PATCH v7 4/8] net/cpfl: set up control path yuying.zhang
                             ` (5 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-26 18:16 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Set up the rte_flow backend skeleton. Introduce the framework
to support different engines as rte_flow backend. Bridge rte_flow
driver API to flow engines.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  53 ++++++
 drivers/net/cpfl/cpfl_ethdev.h |   5 +
 drivers/net/cpfl/cpfl_flow.c   | 339 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow.h   |  85 +++++++++
 drivers/net/cpfl/meson.build   |   1 +
 5 files changed, 483 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 1745f703c8..c350728861 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -15,6 +15,7 @@
 #include "cpfl_ethdev.h"
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1074,6 +1075,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+	struct rte_flow *p_flow;
+
+	while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+		TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+		if (p_flow->engine->free)
+			p_flow->engine->free(p_flow);
+		rte_free(p_flow);
+	}
+}
+
 static int
 cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
 {
@@ -1105,6 +1119,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
 		cpfl_p2p_queue_grps_del(vport);
 
+	cpfl_flow_free(cpfl_vport);
 	idpf_vport_deinit(vport);
 	rte_free(cpfl_vport->p2p_q_chunks_info);
 
@@ -1117,6 +1132,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+		      const struct rte_flow_ops **ops)
+{
+	struct cpfl_itf *itf;
+
+	if (!dev)
+		return -EINVAL;
+
+	itf = CPFL_DEV_TO_ITF(dev);
+
+	/* only vport support rte_flow */
+	if (itf->type != CPFL_ITF_TYPE_VPORT)
+		return -ENOTSUP;
+#ifdef RTE_HAS_JANSSON
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static int
 cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 			    size_t len, uint32_t tx)
@@ -1318,6 +1356,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.xstats_get			= cpfl_dev_xstats_get,
 	.xstats_get_names		= cpfl_dev_xstats_get_names,
 	.xstats_reset			= cpfl_dev_xstats_reset,
+	.flow_ops_get			= cpfl_dev_flow_ops_get,
 	.hairpin_cap_get		= cpfl_hairpin_cap_get,
 	.rx_hairpin_queue_setup		= cpfl_rx_hairpin_queue_setup,
 	.tx_hairpin_queue_setup		= cpfl_tx_hairpin_queue_setup,
@@ -2021,6 +2060,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+#ifdef RTE_HAS_JANSSON
+	ret = cpfl_flow_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init flow module");
+		goto err_flow_init;
+	}
+#endif
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2028,6 +2074,9 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+#ifdef RTE_HAS_JANSSON
+err_flow_init:
+#endif
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2182,6 +2231,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
 	cpfl_vport->itf.adapter = adapter;
 	cpfl_vport->itf.data = dev->data;
+	TAILQ_INIT(&cpfl_vport->itf.flow_list);
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
@@ -2262,6 +2312,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+#ifdef RTE_HAS_JANSSON
+	cpfl_flow_uninit(adapter);
+#endif
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 383dbd14c6..69bf32cfbd 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -140,9 +140,12 @@ enum cpfl_itf_type {
 	CPFL_ITF_TYPE_REPRESENTOR,
 };
 
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
+	struct cpfl_flow_list flow_list;
 	void *data;
 };
 
@@ -206,6 +209,8 @@ struct cpfl_adapter_ext {
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
 
+	struct cpfl_flow_js_parser *flow_parser;
+
 	struct cpfl_metadata meta;
 };
 
diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c
new file mode 100644
index 0000000000..03dd1ffa44
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.c
@@ -0,0 +1,339 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+	TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->parse_pattern_action)
+			continue;
+
+		if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+			continue;
+		return engine;
+	}
+
+	return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+	int ret;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->init) {
+			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+				     engine->type);
+			return -ENOTSUP;
+		}
+
+		ret = engine->init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+				     engine->type);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (engine->uninit)
+			engine->uninit(adapter);
+	}
+}
+
+static int
+cpfl_flow_attr_valid(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	if (attr->priority > CPFL_PREC_MAX) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Only support priority 0-7.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	int ret;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = cpfl_flow_attr_valid(attr, error);
+	if (ret)
+		return ret;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+__cpfl_flow_validate(struct rte_eth_dev *dev,
+		     const struct rte_flow_attr *attr,
+		     const struct rte_flow_item pattern[],
+		     const struct rte_flow_action actions[],
+		     void **meta,
+		     struct cpfl_flow_engine **engine,
+		     struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+	if (ret)
+		return ret;
+
+	*engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta);
+	if (!*engine) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched engine.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	int ret;
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error);
+
+	return ret;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_engine *engine = NULL;
+	struct rte_flow *flow;
+	void *meta;
+	int ret;
+
+	flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return NULL;
+	}
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	if (!engine->create) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched flow creation function");
+		rte_free(flow);
+		return NULL;
+	}
+
+	ret = engine->create(dev, flow, meta, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	flow->engine = engine;
+	TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+	return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	int ret = 0;
+
+	if (!flow || !flow->engine || !flow->engine->destroy) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	ret = flow->engine->destroy(dev, flow, error);
+	if (!ret)
+		TAILQ_REMOVE(&itf->flow_list, flow, next);
+	else
+		PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+	return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct rte_flow *p_flow;
+	void *temp;
+	int ret = 0;
+
+	RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+		ret = cpfl_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to flush flows");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		const struct rte_flow_action *actions,
+		void *data,
+		struct rte_flow_error *error)
+{
+	struct rte_flow_query_count *count = data;
+	int ret = -EINVAL;
+
+	if (!flow || !flow->engine || !flow->engine->query_count) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			ret = flow->engine->query_count(dev, flow, count, error);
+			break;
+		default:
+			ret = rte_flow_error_set(error, ENOTSUP,
+						 RTE_FLOW_ERROR_TYPE_ACTION,
+						 actions,
+						 "action not supported");
+			break;
+		}
+	}
+
+	return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+	.validate = cpfl_flow_validate,
+	.create = cpfl_flow_create,
+	.destroy = cpfl_flow_destroy,
+	.flush = cpfl_flow_flush,
+	.query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+	int ret;
+
+	if (ad->devargs.flow_parser[0] == '\0') {
+		PMD_INIT_LOG(WARNING, "flow module is not initialized");
+		return 0;
+	}
+
+	ret = cpfl_flow_engine_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init flow engines");
+		goto err;
+	}
+
+	ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to create flow parser");
+		goto err;
+	}
+
+	return ret;
+
+err:
+	cpfl_flow_engine_uninit(ad);
+	return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+	if (ad->devargs.flow_parser[0] == '\0')
+		return;
+
+	cpfl_parser_destroy(ad->flow_parser);
+	cpfl_flow_engine_uninit(ad);
+}
diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h
new file mode 100644
index 0000000000..8c19b853ca
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.h
@@ -0,0 +1,85 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#define CPFL_PREC_MAX 7
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+	CPFL_FLOW_ENGINE_NONE = 0,
+	CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+			       struct rte_flow *flow,
+			       void *meta,
+			       struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+				struct rte_flow *flow,
+				struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+			      struct rte_flow *flow,
+			      struct rte_flow_query_count *count,
+			      struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+					     const struct rte_flow_attr *attr,
+					     const struct rte_flow_item pattern[],
+					     const struct rte_flow_action actions[],
+					     void **meta);
+
+struct cpfl_flow_engine {
+	TAILQ_ENTRY(cpfl_flow_engine) node;
+	enum cpfl_flow_engine_type type;
+	engine_init_t init;
+	engine_uninit_t uninit;
+	engine_create_t create;
+	engine_destroy_t destroy;
+	engine_query_t query_count;
+	engine_free_t free;
+	engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) next;
+	struct cpfl_flow_engine *engine;
+	void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev,
+						const struct rte_flow_attr *attr,
+						const struct rte_flow_item pattern[],
+						const struct rte_flow_action actions[],
+						void **meta);
+int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+				  const struct rte_flow_attr *attr,
+				  const struct rte_flow_item pattern[],
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+		    struct rte_flow *flow,
+		    const struct rte_flow_action *actions,
+		    void *data,
+		    struct rte_flow_error *error);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d767818eb7..f5654d5b0e 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -41,6 +41,7 @@ endif
 
 if dpdk_conf.has('RTE_HAS_JANSSON')
     sources += files(
+	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
     )
     ext_deps += jansson_dep
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v7 4/8] net/cpfl: set up control path
  2023-09-26 18:16         ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
                             ` (2 preceding siblings ...)
  2023-09-26 18:16           ` [PATCH v7 3/8] net/cpfl: set up rte flow skeleton yuying.zhang
@ 2023-09-26 18:16           ` yuying.zhang
  2023-09-26 18:17           ` [PATCH v7 5/8] net/cpfl: add FXP low level implementation yuying.zhang
                             ` (4 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-26 18:16 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Set up control vport and control queue for flow offloading.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_controlq.c | 801 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  75 +++
 drivers/net/cpfl/cpfl_ethdev.c   | 270 +++++++++++
 drivers/net/cpfl/cpfl_ethdev.h   |  14 +
 drivers/net/cpfl/cpfl_vchnl.c    | 144 ++++++
 drivers/net/cpfl/meson.build     |   1 +
 6 files changed, 1305 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h

diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
new file mode 100644
index 0000000000..4a925bc338
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -0,0 +1,801 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include "cpfl_controlq.h"
+#include "base/idpf_controlq.h"
+#include "rte_common.h"
+
+/**
+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
+ * @qinfo: pointer to create control queue info struct
+ *
+ * Verify that DMA parameter of each DMA memory struct is present and
+ * consistent with control queue parameters
+ */
+static inline int
+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
+{
+	struct idpf_dma_mem *ring = &qinfo->ring_mem;
+	struct idpf_dma_mem *buf = &qinfo->buf_mem;
+
+	if (!ring->va || !ring->size)
+		return -EINVAL;
+
+	if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
+		return -EINVAL;
+
+	/* no need for buffer checks for TX queues */
+	if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
+		return 0;
+
+	if (!buf->va || !buf->size)
+		return -EINVAL;
+
+	/* accommodate different types of rx ring buffer sizes */
+	if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
+	     buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
+	    (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
+	     buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ * @qinfo: pointer to create queue info struct
+ *
+ * The CP takes care of all DMA memory allocations. Store the allocated memory
+ * information for the descriptor ring and buffers. If the memory for either the
+ * descriptor ring or the buffers is not allocated properly and/or inconsistent
+ * with the control queue parameters, this routine will free the memory for
+ * both the descriptors and the buffers
+ */
+int
+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
+			 struct cpfl_ctlq_create_info *qinfo)
+{
+	int ret_code = 0;
+	unsigned int elem_size;
+	int i = 0;
+
+	ret_code = cpfl_check_dma_mem_parameters(qinfo);
+	if (ret_code)
+		/* TODO: Log an error message per CP */
+		goto err;
+
+	cq->desc_ring.va = qinfo->ring_mem.va;
+	cq->desc_ring.pa = qinfo->ring_mem.pa;
+	cq->desc_ring.size = qinfo->ring_mem.size;
+
+	switch (cq->cq_type) {
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_RDMA_RX:
+		/* Only receive queues will have allocated buffers
+		 * during init.  CP allocates one big chunk of DMA
+		 * region who size is equal to ring_len * buff_size.
+		 * In CPFLib, the block gets broken down to multiple
+		 * smaller blocks that actually gets programmed in the hardware.
+		 */
+
+		cq->bi.rx_buff = (struct idpf_dma_mem **)
+			idpf_calloc(hw, cq->ring_size,
+				    sizeof(struct idpf_dma_mem *));
+		if (!cq->bi.rx_buff) {
+			ret_code = -ENOMEM;
+			/* TODO: Log an error message per CP */
+			goto err;
+		}
+
+		elem_size = qinfo->buf_size;
+		for (i = 0; i < cq->ring_size; i++) {
+			cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
+					    (hw, 1,
+					     sizeof(struct idpf_dma_mem));
+			if (!cq->bi.rx_buff[i]) {
+				ret_code = -ENOMEM;
+				goto free_rx_buffs;
+			}
+			cq->bi.rx_buff[i]->va =
+			    (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));
+			cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
+					       (i * elem_size);
+			cq->bi.rx_buff[i]->size = elem_size;
+		}
+		break;
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_RDMA_TX:
+	case IDPF_CTLQ_TYPE_RDMA_COMPL:
+		break;
+	default:
+		ret_code = -EINVAL;
+	}
+
+	return ret_code;
+
+free_rx_buffs:
+	i--;
+	for (; i >= 0; i--)
+		idpf_free(hw, cq->bi.rx_buff[i]);
+
+	if (!cq->bi.rx_buff)
+		idpf_free(hw, cq->bi.rx_buff);
+
+err:
+	return ret_code;
+}
+
+/**
+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	for (i = 0; i < cq->ring_size; i++) {
+		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+		/* No buffer to post to descriptor, continue */
+		if (!bi)
+			continue;
+
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+		desc->opcode = 0;
+		desc->datalen = CPU_TO_LE16(bi->size);
+		desc->ret_val = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+		desc->params.indirect.param0 = 0;
+		desc->params.indirect.param1 = 0;
+	}
+}
+
+/**
+ * cpfl_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
+{
+	/* set control queue registers in our local struct */
+	cq->reg.head = q_create_info->reg.head;
+	cq->reg.tail = q_create_info->reg.tail;
+	cq->reg.len = q_create_info->reg.len;
+	cq->reg.bah = q_create_info->reg.bah;
+	cq->reg.bal = q_create_info->reg.bal;
+	cq->reg.len_mask = q_create_info->reg.len_mask;
+	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+	cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * cpfl_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
+{
+	/* Update tail to post pre-allocated buffers for rx queues */
+	if (is_rxq)
+		wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
+
+	/* For non-Mailbox control queues only TAIL need to be set */
+	if (cq->q_id != -1)
+		return;
+
+	/* Clear Head for both send or receive */
+	wr32(hw, cq->reg.head, 0);
+
+	/* set starting point */
+	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
+ * @hw: context info for the callback
+ * @cq: pointer to the specific control queue
+ *
+ * DMA buffers are released by the CP itself
+ */
+static void
+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
+{
+	int i;
+
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
+	    cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
+		for (i = 0; i < cq->ring_size; i++)
+			idpf_free(hw, cq->bi.rx_buff[i]);
+		/* free the buffer header */
+		idpf_free(hw, cq->bi.rx_buff);
+	} else {
+		idpf_free(hw, cq->bi.tx_msg);
+	}
+}
+
+/**
+ * cpfl_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ */
+int
+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+	      struct idpf_ctlq_info **cq_out)
+{
+	struct idpf_ctlq_info *cq;
+	bool is_rxq = false;
+	int status = 0;
+
+	if (!qinfo->len || !qinfo->buf_size ||
+	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+		return -EINVAL;
+
+	cq = (struct idpf_ctlq_info *)
+	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+
+	if (!cq)
+		return -ENOMEM;
+
+	cq->cq_type = qinfo->type;
+	cq->q_id = qinfo->id;
+	cq->buf_size = qinfo->buf_size;
+	cq->ring_size = qinfo->len;
+
+	cq->next_to_use = 0;
+	cq->next_to_clean = 0;
+	cq->next_to_post = cq->ring_size - 1;
+
+	switch (qinfo->type) {
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+		is_rxq = true;
+		/* fallthrough */
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+		status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
+		break;
+
+	default:
+		status = -EINVAL;
+		break;
+	}
+
+	if (status)
+		goto init_free_q;
+
+	if (is_rxq) {
+		cpfl_ctlq_init_rxq_bufs(cq);
+	} else {
+		/* Allocate the array of msg pointers for TX queues */
+		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
+			idpf_calloc(hw, qinfo->len,
+				    sizeof(struct idpf_ctlq_msg *));
+		if (!cq->bi.tx_msg) {
+			status = -ENOMEM;
+			goto init_dealloc_q_mem;
+		}
+	}
+
+	cpfl_ctlq_setup_regs(cq, qinfo);
+
+	cpfl_ctlq_init_regs(hw, cq, is_rxq);
+
+	idpf_init_lock(&cq->cq_lock);
+
+	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
+
+	*cq_out = cq;
+	return status;
+
+init_dealloc_q_mem:
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+	idpf_free(hw, cq);
+	cq = NULL;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+	       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_desc *desc;
+	int num_desc_avail = 0;
+	int status = 0;
+	int i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	/* Ensure there are enough descriptors to send all messages */
+	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+		status = -ENOSPC;
+		goto sq_send_command_out;
+	}
+
+	for (i = 0; i < num_q_msg; i++) {
+		struct idpf_ctlq_msg *msg = &q_msg[i];
+
+		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+		desc->opcode = CPU_TO_LE16(msg->opcode);
+		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+		desc->cookie_high =
+			CPU_TO_LE32(msg->cookie.mbx.chnl_opcode);
+		desc->cookie_low =
+			CPU_TO_LE32(msg->cookie.mbx.chnl_retval);
+		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+				IDPF_CTLQ_FLAG_HOST_ID_S);
+		if (msg->data_len) {
+			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+			desc->datalen |= CPU_TO_LE16(msg->data_len);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+			/* Update the address values in the desc with the pa
+			 * value for respective buffer
+			 */
+			desc->params.indirect.addr_high =
+				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+			desc->params.indirect.addr_low =
+				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		} else {
+			idpf_memcpy(&desc->params, msg->ctx.direct,
+				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		}
+
+		/* Store buffer info */
+		cq->bi.tx_msg[cq->next_to_use] = msg;
+		(cq->next_to_use)++;
+		if (cq->next_to_use == cq->ring_size)
+			cq->next_to_use = 0;
+	}
+
+	/* Force memory write to complete before letting hardware
+	 * know that there are new descriptors to fetch.
+	 */
+	idpf_wmb();
+	wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+	idpf_release_lock(&cq->cq_lock);
+
+	return status;
+}
+
+/**
+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
+ * back for the requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ * @force: (input) clean descriptors which were not done yet. Use with caution
+ * in kernel mode only
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+static int
+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		     struct idpf_ctlq_msg *msg_status[], bool force)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t i = 0, num_to_clean;
+	uint16_t ntc, desc_err;
+	int ret = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*clean_count == 0)
+		return 0;
+	if (*clean_count > cq->ring_size)
+		return -EINVAL;
+
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *clean_count;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		desc_err = LE16_TO_CPU(desc->ret_val);
+		if (desc_err) {
+			/* strip off FW internal code */
+			desc_err &= 0xff;
+		}
+
+		msg_status[i] = cq->bi.tx_msg[ntc];
+		if (!msg_status[i])
+			break;
+		msg_status[i]->status = desc_err;
+		cq->bi.tx_msg[ntc] = NULL;
+		/* Zero out any stale data */
+		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	}
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+
+	/* Return number of descriptors actually cleaned */
+	*clean_count = i;
+
+	return ret;
+}
+
+/**
+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		   struct idpf_ctlq_msg *msg_status[])
+{
+	return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
+}
+
+/**
+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t ntp = cq->next_to_post;
+	bool buffs_avail = false;
+	uint16_t tbp = ntp + 1;
+	int status = 0;
+	int i = 0;
+
+	if (*buff_count > cq->ring_size)
+		return -EINVAL;
+
+	if (*buff_count > 0)
+		buffs_avail = true;
+	idpf_acquire_lock(&cq->cq_lock);
+	if (tbp >= cq->ring_size)
+		tbp = 0;
+
+	if (tbp == cq->next_to_clean)
+		/* Nothing to do */
+		goto post_buffs_out;
+
+	/* Post buffers for as many as provided or up until the last one used */
+	while (ntp != cq->next_to_clean) {
+		desc = IDPF_CTLQ_DESC(cq, ntp);
+		if (cq->bi.rx_buff[ntp])
+			goto fill_desc;
+		if (!buffs_avail) {
+			/* If the caller hasn't given us any buffers or
+			 * there are none left, search the ring itself
+			 * for an available buffer to move to this
+			 * entry starting at the next entry in the ring
+			 */
+			tbp = ntp + 1;
+			/* Wrap ring if necessary */
+			if (tbp >= cq->ring_size)
+				tbp = 0;
+
+			while (tbp != cq->next_to_clean) {
+				if (cq->bi.rx_buff[tbp]) {
+					cq->bi.rx_buff[ntp] =
+						cq->bi.rx_buff[tbp];
+					cq->bi.rx_buff[tbp] = NULL;
+
+					/* Found a buffer, no need to
+					 * search anymore
+					 */
+					break;
+				}
+
+				/* Wrap ring if necessary */
+				tbp++;
+				if (tbp >= cq->ring_size)
+					tbp = 0;
+			}
+
+			if (tbp == cq->next_to_clean)
+				goto post_buffs_out;
+		} else {
+			/* Give back pointer to DMA buffer */
+			cq->bi.rx_buff[ntp] = buffs[i];
+			i++;
+
+			if (i >= *buff_count)
+				buffs_avail = false;
+		}
+
+fill_desc:
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+		/* Post buffers to descriptor */
+		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+		ntp++;
+		if (ntp == cq->ring_size)
+			ntp = 0;
+	}
+
+post_buffs_out:
+	/* Only update tail if buffers were actually posted */
+	if (cq->next_to_post != ntp) {
+		if (ntp)
+			/* Update next_to_post to ntp - 1 since current ntp
+			 * will not have a buffer
+			 */
+			cq->next_to_post = ntp - 1;
+		else
+			/* Wrap to end of end ring since current ntp is 0 */
+			cq->next_to_post = cq->ring_size - 1;
+
+		wr32(hw, cq->reg.tail, cq->next_to_post);
+	}
+
+	idpf_release_lock(&cq->cq_lock);
+	/* return the number of buffers that were not posted */
+	*buff_count = *buff_count - i;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+	       struct idpf_ctlq_msg *q_msg)
+{
+	uint16_t num_to_clean, ntc, ret_val, flags;
+	struct idpf_ctlq_desc *desc;
+	int ret_code = 0;
+	uint16_t i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*num_q_msg == 0)
+		return 0;
+	else if (*num_q_msg > cq->ring_size)
+		return -EINVAL;
+
+	/* take the lock before we start messing with the ring */
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *num_q_msg;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		flags = LE16_TO_CPU(desc->flags);
+		if (!(flags & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		ret_val = LE16_TO_CPU(desc->ret_val);
+		q_msg[i].vmvf_type = (flags &
+				     (IDPF_CTLQ_FLAG_FTYPE_VM |
+				      IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+				      IDPF_CTLQ_FLAG_FTYPE_S;
+
+		if (flags & IDPF_CTLQ_FLAG_ERR)
+			ret_code = -EBADMSG;
+
+		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
+		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
+		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+		q_msg[i].status = ret_val;
+
+		if (desc->datalen) {
+			idpf_memcpy(q_msg[i].ctx.indirect.context,
+				    &desc->params.indirect,
+				    IDPF_INDIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+
+			/* Assign pointer to dma buffer to ctlq_msg array
+			 * to be given to upper layer
+			 */
+			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+			/* Zero out pointer to DMA buffer info;
+			 * will be repopulated by post buffers API
+			 */
+			cq->bi.rx_buff[ntc] = NULL;
+		} else {
+			idpf_memcpy(q_msg[i].ctx.direct,
+				    desc->params.raw,
+				    IDPF_DIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+		}
+
+		/* Zero out stale data in descriptor */
+		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+			    IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	};
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+	*num_q_msg = i;
+	if (*num_q_msg == 0)
+		ret_code = -ENOMSG;
+
+	return ret_code;
+}
+
+int
+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+		    struct idpf_ctlq_info **cq)
+{
+	return cpfl_ctlq_add(hw, qinfo, cq);
+}
+
+/**
+ * cpfl_ctlq_shutdown - shutdown the CQ
+ * The main shutdown routine for any controq queue
+ */
+static void
+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (!cq->ring_size)
+		goto shutdown_sq_out;
+
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+
+	/* Set ring_size to 0 to indicate uninitialized queue */
+	cq->ring_size = 0;
+
+shutdown_sq_out:
+	idpf_release_lock(&cq->cq_lock);
+	idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * cpfl_ctlq_remove - deallocate and remove specified control queue
+ */
+static void
+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	LIST_REMOVE(cq, cq_list);
+	cpfl_ctlq_shutdown(hw, cq);
+	idpf_free(hw, cq);
+}
+
+void
+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	cpfl_ctlq_remove(hw, cq);
+}
+
+int
+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		     struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			      uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
+
+int
+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			 struct idpf_ctlq_msg *msg_status[])
+{
+	return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
new file mode 100644
index 0000000000..740ae6522c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CONTROLQ_H_
+#define _CPFL_CONTROLQ_H_
+
+#include "base/idpf_osdep.h"
+#include "base/idpf_controlq_api.h"
+
+#define CPFL_CTLQ_DESCRIPTOR_SIZE	32
+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE	4096
+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE	256
+#define CPFL_DFLT_MBX_RING_LEN		512
+#define CPFL_CFGQ_RING_LEN		512
+
+/* CRQ/CSQ specific error codes */
+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */
+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */
+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */
+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */
+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct cpfl_ctlq_create_info {
+	enum idpf_ctlq_type type;
+	int id; /* absolute queue offset passed as input
+		 * -1 for default mailbox if present
+		 */
+	uint16_t len; /* Queue length passed as input */
+	uint16_t buf_size; /* buffer size passed as input */
+	uint64_t base_address; /* output, HPA of the Queue start  */
+	struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+	/* Pass down previously allocated descriptor ring and buffer memory
+	 * for each control queue to be created
+	 */
+	struct idpf_dma_mem ring_mem;
+	/* The CP will allocate one large buffer that the CPFlib will piece
+	 * into individual buffers for each descriptor
+	 */
+	struct idpf_dma_mem buf_mem;
+
+	int ext_info_size;
+	void *ext_info; /* Specific to q type */
+};
+
+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
+			     struct idpf_ctlq_info *cq,
+			     struct cpfl_ctlq_create_info *qinfo);
+int cpfl_ctlq_add(struct idpf_hw *hw,
+		  struct cpfl_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq);
+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+		       struct idpf_ctlq_msg *msg_status[]);
+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			    u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg);
+int cpfl_vport_ctlq_add(struct idpf_hw *hw,
+			struct cpfl_ctlq_create_info *qinfo,
+			struct idpf_ctlq_info **cq);
+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			 u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+			 struct idpf_ctlq_msg q_msg[]);
+
+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+				  u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+			     struct idpf_ctlq_msg *msg_status[]);
+#endif
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c350728861..a2bc6784d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1698,6 +1698,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 		return;
 	}
 
+	/* ignore if it is ctrl vport */
+	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
+		return;
+
 	vport = cpfl_find_vport(adapter, vc_event->vport_id);
 	if (!vport) {
 		PMD_DRV_LOG(ERR, "Can't find vport.");
@@ -1893,6 +1897,262 @@ cpfl_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 }
 
+static int
+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	ret = cpfl_config_ctlq_tx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
+		return ret;
+	}
+
+	ret = cpfl_config_ctlq_rx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
+		return ret;
+	}
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	int i;
+
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (adapter->ctlqp[i])
+			cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+}
+
+static int
+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_ctlq_info *cfg_cq;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		cfg_cq = NULL;
+		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
+					  &adapter->cfgq_info[i],
+					  &cfg_cq);
+		if (ret || !cfg_cq) {
+			PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d",
+				    adapter->cfgq_info[i].id);
+			cpfl_remove_cfgqs(adapter);
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
+			    adapter->cfgq_info[i].id);
+		adapter->ctlqp[i] = cfg_cq;
+	}
+
+	return ret;
+}
+
+#define CPFL_CFGQ_RING_LEN		512
+#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
+#define CPFL_CFGQ_BUFFER_SIZE		256
+#define CPFL_CFGQ_RING_SIZE		512
+
+static int
+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	struct cpfl_vport *vport;
+	int i, err;
+	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);
+	uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;
+
+	vport = &adapter->ctrl_vport;
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (i % 2 == 0) {
+			/* Setup Tx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +
+				i / 2 * vport->base.chunks_info.tx_qtail_spacing;
+		} else {
+			/* Setup Rx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +
+				i / 2 * vport->base.chunks_info.rx_qtail_spacing;
+			if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,
+						buf_size)) {
+				err = -ENOMEM;
+				goto free_mem;
+			}
+		}
+		if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,
+					ring_size)) {
+			err = -ENOMEM;
+			goto free_mem;
+		}
+	}
+	return 0;
+free_mem:
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+	return err;
+}
+
+static int
+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_create_vport *vport_info =
+		(struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;
+	int i;
+
+	vport->itf.adapter = adapter;
+	vport->base.adapter = &adapter->base;
+	vport->base.vport_id = vport_info->vport_id;
+
+	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+		if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {
+			vport->base.chunks_info.tx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.tx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.tx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {
+			vport->base.chunks_info.rx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.rx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.rx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else {
+			PMD_INIT_LOG(ERR, "Unsupported chunk type");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)
+{
+	cpfl_stop_cfgqs(adapter);
+	cpfl_remove_cfgqs(adapter);
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+}
+
+static int
+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_vc_create_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to create control vport");
+		return ret;
+	}
+
+	ret = cpfl_init_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init control vport");
+		goto err_init_ctrl_vport;
+	}
+
+	ret = cpfl_cfgq_setup(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control queues");
+		goto err_cfgq_setup;
+	}
+
+	ret = cpfl_add_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to add control queues");
+		goto err_add_cfgq;
+	}
+
+	ret = cpfl_start_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to start control queues");
+		goto err_start_cfgqs;
+	}
+
+	return 0;
+
+err_start_cfgqs:
+	cpfl_stop_cfgqs(adapter);
+err_add_cfgq:
+	cpfl_remove_cfgqs(adapter);
+err_cfgq_setup:
+err_init_ctrl_vport:
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+
+	return ret;
+}
+
 static struct virtchnl2_get_capabilities req_caps = {
 	.csum_caps =
 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
@@ -2060,6 +2320,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+	ret = cpfl_ctrl_path_open(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control path");
+		goto err_create_ctrl_vport;
+	}
+
 #ifdef RTE_HAS_JANSSON
 	ret = cpfl_flow_init(adapter);
 	if (ret) {
@@ -2076,7 +2342,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 #ifdef RTE_HAS_JANSSON
 err_flow_init:
+	cpfl_ctrl_path_close(adapter);
 #endif
+err_create_ctrl_vport:
+	rte_free(adapter->vports);
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2315,6 +2584,7 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 #ifdef RTE_HAS_JANSSON
 	cpfl_flow_uninit(adapter);
 #endif
+	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 69bf32cfbd..7f83d170d7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -22,6 +22,7 @@
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
 #include "cpfl_representor.h"
+#include "cpfl_controlq.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -82,6 +83,10 @@
 #define CPFL_META_CHUNK_LENGTH	1024
 #define CPFL_META_LENGTH	32
 
+#define CPFL_RX_CFGQ_NUM	4
+#define CPFL_TX_CFGQ_NUM	4
+#define CPFL_CFGQ_NUM		8
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -212,6 +217,12 @@ struct cpfl_adapter_ext {
 	struct cpfl_flow_js_parser *flow_parser;
 
 	struct cpfl_metadata meta;
+
+	/* ctrl vport and ctrl queues. */
+	struct cpfl_vport ctrl_vport;
+	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
+	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
+	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -226,6 +237,9 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 			   struct cpchnl2_vport_id *vport_id,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_info_response *response);
+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
index a21a4a451f..932840a972 100644
--- a/drivers/net/cpfl/cpfl_vchnl.c
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 
 	return 0;
 }
+
+int
+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct virtchnl2_create_vport vport_msg;
+	struct idpf_cmd_info args;
+	int err = -1;
+
+	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
+	vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+	vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
+	vport_msg.num_tx_complq = 0;
+	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
+	vport_msg.num_rx_bufq = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+	args.in_args = (uint8_t *)&vport_msg;
+	args.in_args_size = sizeof(vport_msg);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+		return err;
+	}
+
+	rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
+		   IDPF_DFLT_MBX_BUF_SIZE);
+	return err;
+}
+
+int
+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_RX_CFGQ_NUM;
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (!vc_rxqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->base.vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		rxq_info = &vc_rxqs->qinfo[i];
+		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
+		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;
+		rxq_info->max_pkt_size = vport->base.max_pkt_len;
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_rxqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_TX_CFGQ_NUM;
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (!vc_txqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->base.vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		txq_info = &vc_txqs->qinfo[i];
+		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
+		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_txqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index f5654d5b0e..290ff1e655 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -18,6 +18,7 @@ sources = files(
         'cpfl_rxtx.c',
         'cpfl_vchnl.c',
         'cpfl_representor.c',
+        'cpfl_controlq.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v7 5/8] net/cpfl: add FXP low level implementation
  2023-09-26 18:16         ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
                             ` (3 preceding siblings ...)
  2023-09-26 18:16           ` [PATCH v7 4/8] net/cpfl: set up control path yuying.zhang
@ 2023-09-26 18:17           ` yuying.zhang
  2023-09-26 18:17           ` [PATCH v7 6/8] net/cpfl: add fxp rule module yuying.zhang
                             ` (3 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-26 18:17 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add low level implementation for CPFL PMD to create / delete
rules on IPU's Flexible Packet Processor(FXP).

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_actions.h | 858 ++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_rules.c   | 126 +++++
 drivers/net/cpfl/cpfl_rules.h   | 306 ++++++++++++
 drivers/net/cpfl/meson.build    |   1 +
 4 files changed, 1291 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h
new file mode 100644
index 0000000000..7b82119e39
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_actions.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_ACTIONS_H_
+#define _CPFL_ACTIONS_H_
+
+#include "base/idpf_osdep.h"
+
+#pragma pack(1)
+
+union cpfl_action_set {
+	uint32_t data;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 4;
+		uint32_t tag : 1;
+		uint32_t prec : 3;
+	} set_24b_a;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 3;
+		uint32_t tag : 2;
+		uint32_t prec : 3;
+	} set_24b_b;
+
+	struct {
+		uint32_t val : 16;
+		uint32_t idx : 4;
+		uint32_t unused : 6;
+		uint32_t tag : 3;
+		uint32_t prec : 3;
+	} set_16b;
+
+	struct {
+		uint32_t val_a : 8;
+		uint32_t val_b : 8;
+		uint32_t idx_a : 4;
+		uint32_t idx_b : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_8b;
+
+	struct {
+		uint32_t val : 10;
+		uint32_t ena : 10;
+		uint32_t idx : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_1b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} nop;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} chained_24b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} aux_flags;
+};
+
+struct cpfl_action_set_ext {
+#define CPFL_ACTION_SET_EXT_CNT 2
+	union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT];
+};
+
+#pragma pack()
+
+/**
+ * cpfl_act_nop - Encode a NOP action
+ */
+static inline union cpfl_action_set
+cpfl_act_nop(void)
+{
+	union cpfl_action_set act;
+
+	act.data = 0;
+	return act;
+}
+
+/**
+ * cpfl_is_nop_action - Indicate if an action set is a NOP
+ */
+static inline bool
+cpfl_is_nop_action(union cpfl_action_set *act)
+{
+	return act->data == cpfl_act_nop().data;
+}
+
+#define CPFL_MAKE_MASK32(b, s)	((((uint32_t)1 << (b)) - 1) << (s))
+
+#define CPFL_ACT_PREC_MAX	7
+#define CPFL_ACT_PREC_S		29
+#define CPFL_ACT_PREC_M		CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S)
+#define CPFL_ACT_PREC_SET(p)	\
+	(((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M)
+#define CPFL_ACT_PREC_CHECK(p)	((p) > 0 && (p) <= CPFL_ACT_PREC_MAX)
+
+#define CPFL_METADATA_ID_CNT		32	/* Max number of metadata IDs */
+#define CPFL_METADATA_STRUCT_MAX_SZ	128	/* Max metadata size per ID */
+
+/*******************************************************************************
+ * 1-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_1B_OP_S	24
+#define CPFL_ACT_1B_OP_M	CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S)
+#define CPFL_ACT_1B_OP		((uint32_t)(0x01) << CPFL_ACT_1B_OP_S)
+
+#define CPFL_ACT_1B_VAL_S	0
+#define CPFL_ACT_1B_VAL_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S)
+#define CPFL_ACT_1B_EN_S	10
+#define CPFL_ACT_1B_EN_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S)
+#define CPFL_ACT_1B_INDEX_S	20
+#define CPFL_ACT_1B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S)
+
+/* 1-bit actions currently uses only INDEX of 0 */
+#define CPFL_ACT_MAKE_1B(prec, en, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \
+	 ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \
+	 (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M))
+
+enum cpfl_act_1b_op {
+	CPFL_ACT_1B_OP_DROP		= 0x01,
+	CPFL_ACT_1B_OP_HDR_SPLIT	= 0x02,
+	CPFL_ACT_1B_OP_DIR_CHANGE	= 0x04,
+	CPFL_ACT_1B_OP_DEFER_DROP	= 0x08,
+	CPFL_ACT_1B_OP_ORIG_MIR_MD	= 0x80
+};
+
+#define CPFL_ACT_1B_COMMIT_MODE_S	4
+#define CPFL_ACT_1B_COMMIT_MODE_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S)
+
+/**
+ * cpfl_act_commit_mode - action commit mode for certain action classes
+ */
+enum cpfl_act_commit_mode {
+	/* Action processing for the initial classification pass */
+	CPFL_ACT_COMMIT_ALL		= 0, /* Commit all actions */
+	CPFL_ACT_COMMIT_PRE_MOD		= 1, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_NONE		= 2, /* Commit no action */
+	/* Action processing for deferred actions in a recirculation pass */
+	CPFL_ACT_COMMIT_RECIR_ALL	= 4, /* Commit all actions */
+	CPFL_ACT_COMMIT_RECIR_PRE_MOD	= 5, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_RECIR_NONE	= 6  /* Commit no action */
+};
+
+/*******************************************************************************
+ * 8-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_8B_S	24
+#define CPFL_ACT_OP_8B_M	CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S)
+#define CPFL_ACT_OP_8B		((uint32_t)(0x02) << CPFL_ACT_OP_8B_S)
+
+#define CPFL_ACT_8B_A_VAL_S	0
+#define CPFL_ACT_8B_A_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S)
+#define CPFL_ACT_8B_A_INDEX_S	16
+#define CPFL_ACT_8B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S)
+
+#define CPFL_ACT_8B_B_VAL_S	8
+#define CPFL_ACT_8B_B_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S)
+#define CPFL_ACT_8B_B_INDEX_S	20
+#define CPFL_ACT_8B_B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S)
+
+/* Unless combining two 8-bit actions into an action set, both A and B fields
+ * must be the same,
+ */
+#define CPFL_ACT_MAKE_8B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \
+	 (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \
+	 (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \
+	 (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \
+	 (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M))
+
+/* 8-Bit Action Indices */
+#define CPFL_ACT_8B_INDEX_MOD_META		9
+
+/* 8-Bit Action Miscellaneous */
+#define CPFL_ACT_8B_MOD_META_PROF_CNT		16
+#define CPFL_ACT_8B_MOD_META_VALID		0x80
+
+/*******************************************************************************
+ * 16-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_16B_S	26
+#define CPFL_ACT_OP_16B_M	CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S)
+#define CPFL_ACT_OP_16B		((uint32_t)0x1 << CPFL_ACT_OP_16B_S)
+
+#define CPFL_ACT_16B_INDEX_S	16
+#define CPFL_ACT_16B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S)
+#define CPFL_ACT_16B_VAL_S	0
+#define CPFL_ACT_16B_VAL_M	CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S)
+
+#define CPFL_ACT_MAKE_16B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \
+	 (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M))
+
+/* 16-Bit Action Indices */
+#define CPFL_ACT_16B_INDEX_COUNT_SET		0
+#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX	1
+#define CPFL_ACT_16B_INDEX_SET_VSI		2
+#define CPFL_ACT_16B_INDEX_DEL_MD		4
+#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST		5
+
+/* 16-Bit Action Miscellaneous */
+#define CPFL_ACT_16B_COUNT_SET_CNT		2048 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_SET_VSI_SLOTS		2
+#define CPFL_ACT_16B_FWD_VSI_CNT		1032 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_FWD_VSI_LIST_CNT		256
+#define CPFL_ACT_16B_MOD_VSI_LIST_CNT		1024
+#define CPFL_ACT_16B_FWD_PORT_CNT		4
+#define CPFL_ACT_16B_DEL_MD_MID_CNT		32
+#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS		4
+
+/* 16-Bit SET_MCAST_IDX Action */
+#define CPFL_ACT_16B_SET_MCAST_VALID	((uint32_t)1 << 15)
+
+/* 16-Bit SET_VSI Action Variants */
+#define CPFL_ACT_16B_SET_VSI_VAL_S		0
+#define CPFL_ACT_16B_SET_VSI_VAL_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S)
+#define CPFL_ACT_16B_SET_VSI_PE_S		11
+#define CPFL_ACT_16B_SET_VSI_PE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S)
+#define CPFL_ACT_16B_SET_VSI_TYPE_S		14
+#define CPFL_ACT_16B_SET_VSI_TYPE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S)
+
+/* 16-Bit DEL_MD Action */
+#define CPFL_ACT_16B_DEL_MD_0_S		0
+#define CPFL_ACT_16B_DEL_MD_1_S		5
+
+/* 16-Bit MOD_VSI_LIST Actions */
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S	0
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M	\
+	CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S)
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S	14
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S)
+#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \
+	((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \
+	 (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_ID_M))
+
+#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \
+	((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \
+		CPFL_ACT_16B_SET_VSI_TYPE_M) | \
+	 (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \
+		CPFL_ACT_16B_SET_VSI_PE_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \
+		CPFL_ACT_16B_SET_VSI_VAL_M))
+
+enum cpfl_prot_eng {
+	CPFL_PE_LAN = 0,
+	CPFL_PE_RDMA,
+	CPFL_PE_CRT
+};
+
+enum cpfl_act_fwd_type {
+	CPFL_ACT_FWD_VSI,
+	CPFL_ACT_FWD_VSI_LIST,
+	CPFL_ACT_FWD_PORT
+};
+
+/*******************************************************************************
+ * 24-Bit Actions
+ ******************************************************************************/
+/* Group A */
+#define CPFL_ACT_OP_24B_A_S	28
+#define CPFL_ACT_OP_24B_A_M	CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S)
+#define CPFL_ACT_24B_A_INDEX_S	24
+#define CPFL_ACT_24B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S)
+#define CPFL_ACT_24B_A_VAL_S	0
+#define CPFL_ACT_24B_A_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S)
+
+#define CPFL_ACT_OP_24B_A	((uint32_t)1 << CPFL_ACT_OP_24B_A_S)
+
+#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_MOD_ADDR	0
+#define CPFL_ACT_24B_INDEX_MIRROR_FIRST	1
+#define CPFL_ACT_24B_INDEX_COUNT	2
+#define CPFL_ACT_24B_INDEX_SET_Q	8
+#define CPFL_ACT_24B_INDEX_MOD_PROFILE	9
+#define CPFL_ACT_24B_INDEX_METER	10
+
+#define CPFL_ACT_24B_COUNT_SLOTS	6
+#define CPFL_ACT_24B_METER_SLOTS	6
+
+#define CPFL_ACT_24B_MOD_ADDR_CNT	(16 * 1024 * 1024)
+#define CPFL_ACT_24B_COUNT_ID_CNT	((uint32_t)1 << 24)
+#define CPFL_ACT_24B_SET_Q_CNT		(12 * 1024)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS	3
+
+/* 24-Bit SET_Q Action */
+#define CPFL_ACT_24B_SET_Q_Q_S		0
+#define CPFL_ACT_24B_SET_Q_Q_M		\
+	CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_S	14
+#define CPFL_ACT_24B_SET_Q_Q_RGN_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S)
+#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS	CPFL_MAKE_MASK32(1, 17)
+#define CPFL_ACT_24B_SET_Q_DST_PE_S	21
+#define CPFL_ACT_24B_SET_Q_DST_PE_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S)
+#define CPFL_ACT_24B_SET_Q_VALID	CPFL_MAKE_MASK32(1, 23)
+
+/* 24-Bit MOD_PROFILE Action */
+enum cpfl_act_mod_profile_hint {
+	CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */
+};
+
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_S		0
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S)
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S	12
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S)
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_S		14
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S)
+#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS		((uint32_t)1 << 16)
+#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND	((uint32_t)1 << 17)
+#define CPFL_ACT_24B_MOD_PROFILE_VALID			((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES	4
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT		2048
+
+/* 24-Bit METER Actions */
+#define CPFL_ACT_24B_METER_INDEX_S	0
+#define CPFL_ACT_24B_METER_INDEX_M	\
+	CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S)
+#define CPFL_ACT_24B_METER_BANK_S	20
+#define CPFL_ACT_24B_METER_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S)
+#define CPFL_ACT_24B_METER_VALID	((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_METER_BANK_CNT	6
+#define CPFL_ACT_24B_METER_INDEX_CNT	((uint32_t)1 << 20)
+
+/* Group B */
+#define CPFL_ACT_OP_24B_B_S	27
+#define CPFL_ACT_OP_24B_B_M	CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S)
+#define CPFL_ACT_24B_B_INDEX_S	24
+#define CPFL_ACT_24B_B_INDEX_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S)
+#define CPFL_ACT_24B_B_VAL_S	0
+#define CPFL_ACT_24B_B_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S)
+
+#define CPFL_ACT_OP_24B_B	((uint32_t)1 << CPFL_ACT_OP_24B_B_S)
+
+#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_SET_MD	0
+#define CPFL_ACT_24B_INDEX_RANGE_CHECK	6
+#define CPFL_ACT_24B_SET_MD_SLOTS	6
+
+/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */
+/* 8-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD8_VAL_S	0
+#define CPFL_ACT_24B_SET_MD8_VAL_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S)
+#define CPFL_ACT_24B_SET_MD8_MASK_S	8
+#define CPFL_ACT_24B_SET_MD8_MASK_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S)
+#define CPFL_ACT_24B_SET_MD8_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD8_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S)
+/* 16-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD16_VAL_S	0
+#define CPFL_ACT_24B_SET_MD16_VAL_M	\
+	CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_L_S	16 /* For chained action */
+#define CPFL_ACT_24B_SET_MD16_MASK_L_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_H_SR	8
+#define CPFL_ACT_24B_SET_MD16_MASK_H_M	0xff
+#define CPFL_ACT_24B_SET_MD16_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD16_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD16		((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_SET_MD32_VAL_L_M	CPFL_MAKE_MASK32(24, 0)
+
+#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX		15
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX	7
+#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX	15
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX	7
+
+/* RANGE_CHECK Action */
+enum cpfl_rule_act_rc_mode {
+	CPFL_RULE_ACT_RC_1_RANGE = 0,
+	CPFL_RULE_ACT_RC_2_RANGES = 1,
+	CPFL_RULE_ACT_RC_4_RANGES = 2,
+	CPFL_RULE_ACT_RC_8_RANGES = 3
+};
+
+#define CPFL_ACT_24B_RC_TBL_IDX_S	0
+#define CPFL_ACT_24B_RC_TBL_IDX_M	\
+	CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S)
+#define CPFL_ACT_24B_RC_START_BANK_S	13
+#define CPFL_ACT_24B_RC_START_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S)
+#define CPFL_ACT_24B_RC_MODE_S		16
+#define CPFL_ACT_24B_RC_MODE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S)
+#define CPFL_ACT_24B_RC_XTRACT_PROF_S	18
+#define CPFL_ACT_24B_RC_XTRACT_PROF_M	\
+	CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S)
+
+#define CPFL_ACT_24B_RC_TBL_INDEX_CNT	(8 * 1024)
+#define CPFL_ACT_24B_RC_BANK_CNT	8
+#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT	64
+
+/*******************************************************************************
+ * 24-Bit Chained Auxiliary Actions
+ ******************************************************************************/
+
+/* TODO: HAS is being updated.  Revise the order of chained and base action
+ * when the HAS has it finalized.
+ */
+/**
+ * 24-Bit Chained SET_MD Actions
+ *
+ * Chained SET_MD actions consume two consecutive action sets.  The first one is
+ * the chained AUX action set.  The second one is the base/parent action set.
+ * Chained SET_MD actions can add and/or update metadata structure with IDs from
+ * 0 to 31 while the non-chained SET_MD variants can only update existing meta-
+ * data IDs below 16.
+ */
+
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S	8
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M	\
+	CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD_AUX_ADD		((uint32_t)1 << 15)
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S	16
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_S		0
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_M		\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S	0
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S)
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR	24 /* Upper 8 bits of MD32 */
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M	0xff
+
+#define CPFL_ACT_TYPE_CHAIN_DATA_S	29
+#define CPFL_ACT_TYPE_CHAIN_DATA_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S)
+#define CPFL_ACT_TYPE_CHAIN_DATA	((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_OP_S	21
+#define CPFL_ACT_24B_SET_MD_OP_8B	((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_16B	((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_32B	((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \
+	(CPFL_ACT_TYPE_CHAIN_DATA | (op) | \
+	 (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \
+	 (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \
+	 (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_DATA_M))
+
+/*******************************************************************************
+ * 1-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_drop - Encode a 1-bit DROP action
+ *
+ * The DROP action has precedence over the DEFER_DOP action.
+ * Affect of ACT_COMMIT action on the DROP action:
+ *  - CPFL_ACT_COMMIT_ALL: Packet is dropped.
+ *  - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped.
+ *  - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped.  Recirculation is canceled.
+ *  - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not
+ *    dropped. Recirculation continues.
+ *
+ * Once a DROP action is set, it cannot be reverted during the classification
+ * process of a network packet.
+ */
+static inline union cpfl_action_set
+cpfl_act_drop(uint8_t prec)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1);
+	return a;
+}
+
+/**
+ * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action
+ * An ACT_COMMIT action specifies if and when all actions are committed.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M,
+				  (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S);
+	return a;
+}
+
+/*******************************************************************************
+ * 8-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_meta - Encode an 8-bit MOD_META action
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_meta(uint8_t prec, uint8_t prof)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META,
+				  CPFL_ACT_8B_MOD_META_VALID | prof);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 16-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI)
+ *
+ * This encodes the "Forward to Single VSI" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    vsi >= CPFL_ACT_16B_FWD_VSI_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port)
+ *
+ * This encodes the "Forward to a port" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    port >= CPFL_ACT_16B_FWD_PORT_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 24-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action
+ *
+ * This MOD_ADDR specifies the index of the MOD content entry an accompanying
+ * MOD_PROFILE action uses.  Some MOD_PROFILE actions may need to use extra
+ * information from a Modify content entry, and requires an accompanying
+ * MOD_ADDR action.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR,
+				     mod_addr);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant)
+ *
+ * This action is a "Forward to a single queue" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q,
+			bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region)
+ *
+ * This action is a "Forward to a queue region" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base,
+			       uint8_t q_rgn_bits, bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT ||
+	    q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base |
+		((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action
+ *
+ * This action specifies a Modify profile to use for modifying the network
+ * packet being classified.  In addition, it also provides a hint to whether
+ * or not an accompanied MOD_ADDR action is expected and should be prefetched.
+ *
+ * There is only one MOD_PROFILE action slot.  If multiple classification blocks
+ * emit this action, the precedence value and auxiliary precedence value will be
+ * used to select one with higher precedence.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus,
+		     bool miss_prepend, enum cpfl_act_mod_profile_hint hint)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) ||
+	    prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT ||
+	    ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_MOD_PROFILE_VALID |
+		(((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) &
+			CPFL_ACT_24B_MOD_PROFILE_HINT_M) |
+		(((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) &
+			CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) |
+		((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S);
+	if (append_act_bus)
+		val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS;
+	if (miss_prepend)
+		val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND;
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_meter - Encode a 24-bit METER action
+ *
+ * Return NOP if any given input parameter is invalid.
+ *
+ * A bank can only be used by one of the METER action slots.  If multiple METER
+ * actions select the same bank, the action with the highest action slot wins.
+ * In Policer mode, METER actions at the higher indexes have precedence over
+ * ones at lower indexes.
+ */
+static inline union cpfl_action_set
+cpfl_act_meter(uint8_t slot, uint8_t prec, uint32_t idx, uint8_t bank)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS  ||
+	    idx >= CPFL_ACT_24B_METER_INDEX_CNT ||
+	    bank >= CPFL_ACT_24B_METER_BANK_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_METER_VALID |
+		(uint32_t)idx << CPFL_ACT_24B_METER_INDEX_S |
+		(uint32_t)bank << CPFL_ACT_24B_METER_BANK_S;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot,
+				     val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot
+ *
+ * This SET_MD action sets/updates a byte of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 bytes of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX ||
+	    off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) |
+		((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) |
+		((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot
+ *
+ * This SET_MD action sets/updates a word of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 words of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX ||
+	    word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) |
+		((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) |
+		((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot
+ *
+ * This SET_MD action sets/updates a dword of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action is made up of 2 chained
+ * action sets.  The chained action set is the first.  The base/parent action
+ * sets is the second.
+ */
+static inline void
+cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid,
+		      uint8_t off, uint32_t val)
+{
+	if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) ||
+	    mid >= CPFL_METADATA_ID_CNT ||
+	    (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) {
+		ext->acts[0] = cpfl_act_nop();
+		ext->acts[1] = cpfl_act_nop();
+	} else {
+		uint32_t tmp;
+
+		/* Chained action set comes first */
+		tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR;
+		ext->acts[0].data =
+			CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B,
+						     mid, off, tmp);
+
+		/* Lower 24 bits of value */
+		tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M;
+		ext->acts[1].data =
+			CPFL_ACT_MAKE_24B_B(prec,
+					    CPFL_ACT_24B_INDEX_SET_MD + slot,
+					    tmp);
+	}
+}
+
+#endif /* _CPFL_ACTIONS_H_ */
diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c
new file mode 100644
index 0000000000..eefae1767c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.c
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include <base/idpf_controlq.h>
+#include <stdint.h>
+#include "cpfl_rules.h"
+
+ /**
+  * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor
+  */
+static inline uint64_t
+cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
+{
+	uint64_t context = 0;
+
+	switch (cmn_cfg->opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		/* fallthrough */
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+	case cpfl_ctlq_sem_add_rule:
+	case cpfl_ctlq_sem_del_rule:
+	case cpfl_ctlq_sem_query_rule:
+	case cpfl_ctlq_sem_update_rule:
+		context |= SHIFT_VAL64(cmn_cfg->time_sel,
+				       MEV_RULE_TIME_SEL);
+		context |= SHIFT_VAL64(cmn_cfg->time_sel_val,
+				       MEV_RULE_TIME_SEL_VAL);
+		context |= SHIFT_VAL64(cmn_cfg->host_id,
+				       MEV_RULE_HOST_ID);
+		context |= SHIFT_VAL64(cmn_cfg->port_num,
+				       MEV_RULE_PORT_NUM);
+		context |= SHIFT_VAL64(cmn_cfg->resp_req,
+				       MEV_RULE_RESP_REQ);
+		context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru,
+				       MEV_RULE_CACHE_WR_THRU);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc_ctx - get bit context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
+{
+	uint64_t context = 0;
+
+	context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common);
+
+	switch (cfg_data->common.opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size,
+				       MEV_RULE_MOD_OBJ_SIZE);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content,
+				       MEV_RULE_PIN_MOD_CONTENT);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.index,
+				       MEV_RULE_MOD_INDEX);
+		break;
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
+				       MEV_RULE_OBJ_ID);
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
+				       MEV_RULE_OBJ_ADDR);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc - build descriptor data from rule config data
+ *
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg)
+{
+	uint64_t context;
+	uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0];
+
+	context = cpfl_prep_rule_desc_ctx(cfg_data);
+	*ctlq_ctx = CPU_TO_LE64(context);
+	memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t));
+	ctlq_msg->opcode = (uint16_t)cfg_data->common.opc;
+	ctlq_msg->data_len = cfg_data->common.buf_len;
+	ctlq_msg->status = 0;
+	ctlq_msg->ctx.indirect.payload = cfg_data->common.payload;
+}
+
+/**
+ * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob)
+{
+	uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions;
+	const uint32_t *act_src = (const uint32_t *)act_bytes;
+	uint32_t i;
+
+	idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
+	idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len,
+		    CPFL_NONDMA_TO_DMA);
+
+	for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
+		*act_dst++ = CPU_TO_LE32(*act_src++);
+
+	*((uint16_t *)&rule_blob->sem_rule.cfg_ctrl) = CPU_TO_LE16(cfg_ctrl);
+}
diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h
new file mode 100644
index 0000000000..d23eae8e91
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_RULES_API_H_
+#define _CPFL_RULES_API_H_
+
+#include <base/idpf_controlq_api.h>
+#include "cpfl_actions.h"
+#include "cpfl_controlq.h"
+
+/* Common Bit Mask Macros */
+#define CPFL_BIT(b)			(1 << (b))
+
+#define MAKE_MASK(type, mask, shift)	((u##type) (mask) << (shift))
+#define SHIFT_VAL_LT(type, val, field)		\
+		(((u##type)(val) << field##_S) & field##_M)
+#define SHIFT_VAL_RT(type, val, field)		\
+		(((u##type)(val) & field##_M) >> field##_S)
+
+#define MAKE_MASK_VAL(type, bit_len)	(((u##type)0x01 << (bit_len)) - 1)
+#define MAKE_MASK_VAL16(bit_len)	MAKE_MASK_VAL(16, bit_len)
+#define MAKE_MASK_VAL64(bit_len)	MAKE_MASK_VAL(64, bit_len)
+
+#define MAKE_MASK64(mask, shift)	MAKE_MASK(64, mask, shift)
+#define MAKE_MASK16(mask, shift)	MAKE_MASK(16, mask, shift)
+#define MAKE_MASK32(mask, shift)	MAKE_MASK(32, mask, shift)
+
+/* Make masks with bit length and left-shifting count */
+#define MAKE_SMASK(type, bits, shift)	\
+	((((u##type)1 << (bits)) - 1) << (shift))
+#define MAKE_SMASK64(bits, shift)	MAKE_SMASK(64, bits, shift)
+#define MAKE_SMASK32(bits, shift)	MAKE_SMASK(32, bits, shift)
+#define MAKE_SMASK16(bits, shift)	MAKE_SMASK(16, bits, shift)
+
+#define SHIFT_VAL64(val, field)		SHIFT_VAL_LT(64, val, field)
+#define SHIFT_VAL32(val, field)		SHIFT_VAL_LT(32, val, field)
+#define SHIFT_VAL16(val, field)		SHIFT_VAL_LT(16, val, field)
+
+/* Rule Config queue opcodes */
+enum cpfl_ctlq_rule_cfg_opc {
+	cpfl_ctlq_sem_add_rule				= 0x1303,
+	cpfl_ctlq_sem_update_rule			= 0x1304,
+	cpfl_ctlq_sem_del_rule				= 0x1305,
+	cpfl_ctlq_sem_query_rule			= 0x1306,
+	cpfl_ctlq_sem_query_rule_hash_addr		= 0x1307,
+	cpfl_ctlq_sem_query_del_rule_hash_addr		= 0x1308,
+
+	cpfl_ctlq_mod_add_update_rule			= 0x1360,
+	cpfl_ctlq_mod_query_rule			= 0x1361,
+};
+
+enum cpfl_cfg_pkt_error_code {
+	CPFL_CFG_PKT_ERR_OK = 0,
+	CPFL_CFG_PKT_ERR_ESRCH = 1,     /* Bad opcode */
+	CPFL_CFG_PKT_ERR_EEXIST = 2,    /* Entry Already exists */
+	CPFL_CFG_PKT_ERR_ENOSPC = 4,    /* No space left in the table*/
+	CPFL_CFG_PKT_ERR_ERANGE = 5,    /* Parameter out of range */
+	CPFL_CFG_PKT_ERR_ESBCOMP = 6,   /* Completion Error */
+	CPFL_CFG_PKT_ERR_ENOPIN = 7,    /* Entry cannot be pinned in cache */
+	CPFL_CFG_PKT_ERR_ENOTFND = 8,   /* Entry Not exists */
+	CPFL_CFG_PKT_ERR_EMAXCOL = 9    /* Max Hash Collision */
+};
+
+/* macros for creating context for rule descriptor */
+#define MEV_RULE_VSI_ID_S		0
+#define MEV_RULE_VSI_ID_M		\
+		MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S)
+
+#define MEV_RULE_TIME_SEL_S		13
+#define MEV_RULE_TIME_SEL_M		\
+		MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S)
+
+#define MEV_RULE_TIME_SEL_VAL_S		15
+#define MEV_RULE_TIME_SEL_VAL_M		\
+		MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S)
+
+#define MEV_RULE_PORT_NUM_S		16
+#define MEV_RULE_HOST_ID_S		18
+#define MEV_RULE_PORT_NUM_M		\
+		MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S)
+#define MEV_RULE_HOST_ID_M		\
+		MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S)
+
+#define MEV_RULE_CACHE_WR_THRU_S	21
+#define MEV_RULE_CACHE_WR_THRU_M	\
+		MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S)
+
+#define MEV_RULE_RESP_REQ_S		22
+#define MEV_RULE_RESP_REQ_M		\
+		MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S)
+#define MEV_RULE_OBJ_ADDR_S		24
+#define MEV_RULE_OBJ_ADDR_M		\
+		MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S)
+#define MEV_RULE_OBJ_ID_S		59
+#define MEV_RULE_OBJ_ID_M		\
+		MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S)
+
+/* macros for creating CFG_CTRL for sem/lem rule blob */
+#define MEV_RULE_CFG_CTRL_PROF_ID_S			0
+#define MEV_RULE_CFG_CTRL_PROF_ID_M			\
+		MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S)
+
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S		11
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M		\
+		MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S)
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_S		13
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S)
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S	14
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M	\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S)
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S		15
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S)
+
+/**
+ * macro to build the CFG_CTRL for rule packet data, which is one of
+ * cpfl_prep_sem_rule_blob()'s input parameter.
+ */
+ /* build SEM CFG_CTRL*/
+#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id,		       \
+				       pin_to_cache, fixed_fetch)	       \
+		(SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID)	     | \
+		 SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID)   | \
+		 SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE)    | \
+		 SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH))
+
+/* build LEM CFG_CTRL*/
+#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror)    \
+		(SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID)             | \
+		 SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE)      | \
+		 SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR))
+
+/* macros for creating mod content config packets */
+#define MEV_RULE_MOD_INDEX_S		24
+#define MEV_RULE_MOD_INDEX_M		\
+		MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S)
+
+#define MEV_RULE_PIN_MOD_CONTENT_S	62
+#define MEV_RULE_PIN_MOD_CONTENT_M	\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S)
+#define MEV_RULE_MOD_OBJ_SIZE_S		63
+#define MEV_RULE_MOD_OBJ_SIZE_M		\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S)
+
+/**
+ * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM
+ * note: The key may be in mixed big/little endian format, the rest of members
+ * are in little endian
+ */
+struct cpfl_sem_rule_cfg_pkt {
+#define MEV_SEM_RULE_KEY_SIZE 128
+	uint8_t key[MEV_SEM_RULE_KEY_SIZE];
+
+#define MEV_SEM_RULE_ACT_SIZE 72
+	uint8_t actions[MEV_SEM_RULE_ACT_SIZE];
+
+	/* Bit(s):
+	 * 10:0 : PROFILE_ID
+	 * 12:11: SUB_PROF_ID (used for SEM only)
+	 * 13   : pin the SEM key content into the cache
+	 * 14   : Reserved
+	 * 15   : Fixed_fetch
+	 */
+	uint8_t cfg_ctrl[2];
+
+	/* Bit(s):
+	 * 0:     valid
+	 * 15:1:  Hints
+	 * 26:16: PROFILE_ID, the profile associated with the entry
+	 * 31:27: PF
+	 * 55:32: FLOW ID (assigned by HW)
+	 * 63:56: EPOCH
+	 */
+	uint8_t ctrl_word[8];
+	uint8_t padding[46];
+};
+
+/**
+ * union cpfl_rule_cfg_pkt_record - Describes rule data blob
+ */
+union cpfl_rule_cfg_pkt_record {
+	struct cpfl_sem_rule_cfg_pkt sem_rule;
+	uint8_t pkt_data[256];
+	uint8_t mod_blob[256];
+};
+
+/**
+ * cpfl_rule_query_addr - LEM/SEM Rule Query Address structure
+ */
+struct cpfl_rule_query_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_query_del_addr - Rule Query and Delete Address
+ */
+struct cpfl_rule_query_del_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_mod_content - MOD Rule Content
+ */
+struct cpfl_rule_mod_content {
+	uint8_t	obj_size;
+	uint8_t	pin_content;
+	uint32_t	index;
+};
+
+/**
+ * cpfl_rule_cfg_data_common - data struct for all rule opcodes
+ *note: some rules may only require part of structure
+ */
+struct cpfl_rule_cfg_data_common {
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	uint64_t	cookie;
+	uint16_t	vsi_id;
+	uint8_t	port_num;
+	uint8_t	host_id;
+	uint8_t	time_sel;
+	uint8_t	time_sel_val;
+	uint8_t	cache_wr_thru;
+	uint8_t	resp_req;
+	uint32_t	ret_val;
+	uint16_t	buf_len;
+	struct idpf_dma_mem *payload;
+};
+
+/**
+ * cpfl_rule_cfg_data - rule config data
+ * note: Before sending rule to HW, caller needs to fill
+ *       in this struct then call cpfl_prep_rule_desc().
+ */
+struct cpfl_rule_cfg_data {
+	struct cpfl_rule_cfg_data_common common;
+	union {
+		struct cpfl_rule_query_addr query_addr;
+		struct cpfl_rule_query_del_addr query_del_addr;
+		struct cpfl_rule_mod_content mod_content;
+	} ext;
+};
+
+/**
+ * cpfl_fill_rule_mod_content - fill info for mod content
+ */
+static inline void
+cpfl_fill_rule_mod_content(uint8_t mod_obj_size,
+			   uint8_t pin_mod_content,
+			   uint32_t mod_index,
+			   struct cpfl_rule_mod_content *mod_content)
+{
+	mod_content->obj_size = mod_obj_size;
+	mod_content->pin_content = pin_mod_content;
+	mod_content->index = mod_index;
+}
+
+/**
+ * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes
+ * note: call this function before calls cpfl_prep_rule_desc()
+ */
+static inline void
+cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc,
+			       uint64_t cookie,
+			       uint16_t vsi_id,
+			       uint8_t port_num,
+			       uint8_t host_id,
+			       uint8_t time_sel,
+			       uint8_t time_sel_val,
+			       uint8_t cache_wr_thru,
+			       uint8_t resp_req,
+			       uint16_t payload_len,
+			       struct idpf_dma_mem *payload,
+			       struct cpfl_rule_cfg_data_common *cfg_cmn)
+{
+	cfg_cmn->opc = opc;
+	cfg_cmn->cookie = cookie;
+	cfg_cmn->vsi_id = vsi_id;
+	cfg_cmn->port_num = port_num;
+	cfg_cmn->resp_req = resp_req;
+	cfg_cmn->ret_val = 0;
+	cfg_cmn->host_id = host_id;
+	cfg_cmn->time_sel = time_sel;
+	cfg_cmn->time_sel_val = time_sel_val;
+	cfg_cmn->cache_wr_thru = cache_wr_thru;
+
+	cfg_cmn->buf_len = payload_len;
+	cfg_cmn->payload = payload;
+}
+
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg);
+
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob);
+
+#endif /* _CPFL_RULES_API_H_ */
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 290ff1e655..e2b6621cea 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -19,6 +19,7 @@ sources = files(
         'cpfl_vchnl.c',
         'cpfl_representor.c',
         'cpfl_controlq.c',
+	'cpfl_rules.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v7 6/8] net/cpfl: add fxp rule module
  2023-09-26 18:16         ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
                             ` (4 preceding siblings ...)
  2023-09-26 18:17           ` [PATCH v7 5/8] net/cpfl: add FXP low level implementation yuying.zhang
@ 2023-09-26 18:17           ` yuying.zhang
  2023-09-28  3:29             ` Zhang, Qi Z
  2023-09-26 18:17           ` [PATCH v7 7/8] net/cpfl: add fxp flow engine yuying.zhang
                             ` (2 subsequent siblings)
  8 siblings, 1 reply; 128+ messages in thread
From: yuying.zhang @ 2023-09-26 18:17 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Implement FXP rule creation / destroying.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c   |  31 ++++
 drivers/net/cpfl/cpfl_ethdev.h   |   6 +
 drivers/net/cpfl/cpfl_fxp_rule.c | 296 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_fxp_rule.h |  68 +++++++
 drivers/net/cpfl/meson.build     |   1 +
 5 files changed, 402 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a2bc6784d0..da78e79652 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 #include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
 	adapter->vports[vport->sw_idx] = NULL;
+	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
 	rte_free(cpfl_vport);
 
 	return 0;
@@ -2466,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
 	return 0;
 }
 
+int
+cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size,
+			 int batch_size)
+{
+	int i;
+
+	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
+		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < batch_size; i++) {
+		dma[i].va = (void *)((uint64_t)orig_dma->va + size * (i + 1));
+		dma[i].pa = orig_dma->pa + size * (i + 1);
+		dma[i].size = size;
+		dma[i].zone = NULL;
+	}
+	return 0;
+}
+
 static int
 cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
@@ -2515,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
 			    &dev->data->mac_addrs[0]);
 
+	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
+	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
+				       cpfl_vport->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		goto err_mac_addrs;
+
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
 		memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
 		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7f83d170d7..8eeeac9910 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -147,10 +147,14 @@ enum cpfl_itf_type {
 
 TAILQ_HEAD(cpfl_flow_list, rte_flow);
 
+#define CPFL_FLOW_BATCH_SIZE  490
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
 	struct cpfl_flow_list flow_list;
+	struct idpf_dma_mem flow_dma;
+	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
 	void *data;
 };
 
@@ -240,6 +244,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
+int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma,
+			     uint32_t size, int batch_size);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
new file mode 100644
index 0000000000..50fac55432
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.c
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		   struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_msg **msg_ptr_list;
+	u16 clean_count = 0;
+	int num_cleaned = 0;
+	int retries = 0;
+	int ret = 0;
+
+	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+	if (!msg_ptr_list) {
+		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+		goto send_err;
+	}
+
+	while (retries <= CTLQ_SEND_RETRIES) {
+		clean_count = num_q_msg - num_cleaned;
+		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+					       &msg_ptr_list[num_cleaned]);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+			goto send_err;
+		}
+
+		num_cleaned += clean_count;
+		retries++;
+		if (num_cleaned >= num_q_msg)
+			break;
+		rte_delay_us_sleep(10);
+	}
+
+	if (retries > CTLQ_SEND_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for completions");
+		ret = -1;
+		goto send_err;
+	}
+
+send_err:
+	if (msg_ptr_list)
+		free(msg_ptr_list);
+err:
+	return ret;
+}
+
+static int
+cpfl_process_rx_ctlq_msg(u16 num_q_msg, struct idpf_ctlq_msg *q_msg)
+{
+	u16 i;
+
+	if (!num_q_msg || !q_msg)
+		return -EINVAL;
+
+	for (i = 0; i < num_q_msg; i++) {
+		if (q_msg[i].status == CPFL_CFG_PKT_ERR_OK) {
+			continue;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_EEXIST &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_add_rule) {
+			PMD_INIT_LOG(ERR, "The rule has confliction with already existed one");
+			return -EINVAL;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_ENOTFND &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_del_rule) {
+			PMD_INIT_LOG(ERR, "The rule has already deleted");
+			return -EINVAL;
+		} else {
+			PMD_INIT_LOG(ERR, "Invalid rule");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		      struct idpf_ctlq_msg q_msg[])
+{
+	int retries = 0;
+	struct idpf_dma_mem *dma;
+	u16 i;
+	uint16_t buff_cnt;
+	int ret = 0, handle_rule = 0;
+
+	retries = 0;
+	while (retries <= CTLQ_RECEIVE_RETRIES) {
+		rte_delay_us_sleep(10);
+		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+		    ret != CPFL_ERR_CTLQ_ERROR) {
+			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_EMPTY)
+			break;
+
+		ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to process rx_ctrlq msg");
+			handle_rule = ret;
+		}
+
+		for (i = 0; i < num_q_msg; i++) {
+			if (q_msg[i].data_len > 0)
+				dma = q_msg[i].ctx.indirect.payload;
+			else
+				dma = NULL;
+
+			buff_cnt = dma ? 1 : 0;
+			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+			if (ret)
+				PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+		}
+		break;
+	}
+
+	if (retries > CTLQ_RECEIVE_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+		ret = -1;
+	}
+
+	return ret + handle_rule;
+}
+
+static int
+cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		   struct idpf_ctlq_msg *msg)
+{
+	struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	struct cpfl_rule_cfg_data cfg = {0};
+
+	/* prepare rule blob */
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(&cfg, 0, sizeof(cfg));
+
+	/* fill info for both query and add/update */
+	cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+				   minfo->pin_mod_content,
+				   minfo->mod_index,
+				   &cfg.ext.mod_content);
+
+	/* only fill content for add/update */
+	memcpy(blob->mod_blob, minfo->mod_content,
+	       minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+	/* pack message */
+	cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+				       rinfo->cookie,
+				       0, /* vsi_id not used for mod */
+				       rinfo->port_num,
+				       NO_HOST_NEEDED,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       (u16)sizeof(*blob),
+				       (void *)dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		       struct idpf_ctlq_msg *msg, bool add)
+{
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	struct cpfl_rule_cfg_data cfg;
+	uint16_t cfg_ctrl;
+
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(msg, 0, sizeof(*msg));
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+							  rinfo->sem.sub_prof_id,
+							  rinfo->sem.pin_to_cache,
+							  rinfo->sem.fixed_fetch);
+		cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+					rinfo->act_bytes, rinfo->act_byte_len,
+					cfg_ctrl, blob);
+		opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+	} else {
+		PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
+		return -1;
+	}
+
+	cpfl_fill_rule_cfg_data_common(opc,
+				       rinfo->cookie,
+				       rinfo->vsi,
+				       rinfo->port_num,
+				       rinfo->host_id,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+	       struct idpf_ctlq_msg *msg, bool add)
+{
+	int ret = 0;
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
+			ret = -1;
+	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+		if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)
+			ret = -1;
+	} else {
+		PMD_INIT_LOG(ERR, "Invalid type of rule");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+int
+cpfl_rule_process(struct cpfl_itf *itf,
+		  struct idpf_ctlq_info *tx_cq,
+		  struct idpf_ctlq_info *rx_cq,
+		  struct cpfl_rule_info *rinfo,
+		  int rule_num,
+		  bool add)
+{
+	struct idpf_hw *hw = &itf->adapter->base.hw;
+	int i;
+	int ret = 0;
+
+	if (rule_num == 0)
+		return 0;
+
+	for (i = 0; i < rule_num; i++) {
+		ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Could not pack rule");
+			return ret;
+		}
+	}
+	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to send control message");
+		return ret;
+	}
+	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to update rule");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
new file mode 100644
index 0000000000..ed757b80b1
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+	uint16_t prof_id;
+	uint8_t sub_prof_id;
+	uint8_t key[CPFL_MAX_KEY_LEN];
+	uint8_t key_byte_len;
+	uint8_t pin_to_cache;
+	uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+	uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+	uint8_t mod_content_byte_len;
+	uint32_t mod_index;
+	uint8_t pin_mod_content;
+	uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+	CPFL_RULE_TYPE_NONE,
+	CPFL_RULE_TYPE_SEM,
+	CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+	enum cpfl_rule_type type;
+	uint64_t cookie;
+	uint8_t host_id;
+	uint8_t port_num;
+	uint8_t resp_req;
+	/* TODO: change this to be dynamically allocated/reallocated */
+	uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+	uint8_t act_byte_len;
+	/* vsi is used for lem and lpm rules */
+	uint16_t vsi;
+	uint8_t clear_mirror_1st_state;
+	/* mod related fields */
+	union {
+		struct cpfl_mod_rule_info mod;
+		struct cpfl_sem_rule_info sem;
+	};
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_process(struct cpfl_itf *itf,
+		      struct idpf_ctlq_info *tx_cq,
+		      struct idpf_ctlq_info *rx_cq,
+		      struct cpfl_rule_info *rinfo,
+		      int rule_num,
+		      bool add);
+int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		       struct idpf_ctlq_msg q_msg[]);
+int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+			  struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index e2b6621cea..6118a16329 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -45,6 +45,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
     sources += files(
 	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
+	    'cpfl_fxp_rule.c',
     )
     ext_deps += jansson_dep
 endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v7 7/8] net/cpfl: add fxp flow engine
  2023-09-26 18:16         ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
                             ` (5 preceding siblings ...)
  2023-09-26 18:17           ` [PATCH v7 6/8] net/cpfl: add fxp rule module yuying.zhang
@ 2023-09-26 18:17           ` yuying.zhang
  2023-09-26 18:17           ` [PATCH v7 8/8] net/cpfl: add flow support for representor yuying.zhang
  2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
  8 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-26 18:17 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Adapt a flow engine to FXP implementation.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h          |  27 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 583 ++++++++++++++++++++++++
 drivers/net/cpfl/meson.build            |   1 +
 3 files changed, 611 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 8eeeac9910..efb0eb5251 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -85,6 +85,8 @@
 
 #define CPFL_RX_CFGQ_NUM	4
 #define CPFL_TX_CFGQ_NUM	4
+#define CPFL_FPCP_CFGQ_TX	0
+#define CPFL_FPCP_CFGQ_RX	1
 #define CPFL_CFGQ_NUM		8
 
 /* bit[15:14] type
@@ -219,6 +221,8 @@ struct cpfl_adapter_ext {
 	struct rte_hash *repr_allowlist_hash;
 
 	struct cpfl_flow_js_parser *flow_parser;
+	struct rte_bitmap *mod_bm;
+	void *mod_bm_mem;
 
 	struct cpfl_metadata meta;
 
@@ -312,4 +316,27 @@ cpfl_get_vsi_id(struct cpfl_itf *itf)
 	return CPFL_INVALID_HW_ID;
 }
 
+static inline struct cpfl_itf *
+cpfl_get_itf_by_port_id(uint16_t port_id)
+{
+	struct rte_eth_dev *dev;
+
+	if (port_id >= RTE_MAX_ETHPORTS) {
+		PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS);
+		return NULL;
+	}
+
+	dev = &rte_eth_devices[port_id];
+	if (dev->state == RTE_ETH_DEV_UNUSED) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id);
+		return NULL;
+	}
+
+	if (!dev->data) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id);
+		return NULL;
+	}
+
+	return CPFL_DEV_TO_ITF(dev);
+}
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
new file mode 100644
index 0000000000..154af5bd35
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <math.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_memcpy.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include <ethdev_driver.h>
+#include "cpfl_rules.h"
+#include "cpfl_logs.h"
+#include "cpfl_ethdev.h"
+#include "cpfl_flow.h"
+#include "cpfl_fxp_rule.h"
+#include "cpfl_flow_parser.h"
+
+#define CPFL_COOKIE_DEF		0x1000
+#define CPFL_MOD_COOKIE_DEF	0x1237561
+#define CPFL_PREC_DEF		1
+#define CPFL_PREC_SET		5
+#define CPFL_TYPE_ID		3
+#define CPFL_OFFSET		0x0a
+#define CPFL_HOST_ID_DEF	0
+#define CPFL_PF_NUM_DEF		0
+#define CPFL_PORT_NUM_DEF	0
+#define CPFL_RESP_REQ_DEF	2
+#define CPFL_PIN_TO_CACHE_DEF	0
+#define CPFL_CLEAR_MIRROR_1ST_STATE_DEF	0
+#define CPFL_FIXED_FETCH_DEF	0
+#define CPFL_PTI_DEF		0
+#define CPFL_MOD_OBJ_SIZE_DEF	0
+#define CPFL_PIN_MOD_CONTENT_DEF	0
+
+#define CPFL_MAX_MOD_CONTENT_INDEX	256
+#define CPFL_MAX_MR_ACTION_NUM	8
+
+/* Struct used when parse detailed rule information with json file */
+struct cpfl_rule_info_meta {
+	struct cpfl_flow_pr_action pr_action;	/* json action field of pattern rule */
+	uint32_t pr_num;			/* number of pattern rules */
+	uint32_t mr_num;			/* number of modification rules */
+	uint32_t rule_num;			/* number of all rules */
+	struct cpfl_rule_info rules[0];
+};
+
+static uint32_t cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad);
+static void cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
+uint64_t cpfl_rule_cookie = CPFL_COOKIE_DEF;
+
+static int
+cpfl_fxp_create(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim = meta;
+	struct cpfl_vport *vport;
+
+	if (!rim)
+		return ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		/* Every vport has one pair control queues configured to handle message.
+		 * Even index is tx queue and odd index is rx queue.
+		 */
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
+				rim->rules, rim->rule_num, true);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "cpfl filter create flow fail");
+		rte_free(rim);
+		return ret;
+	}
+
+	flow->rule = rim;
+
+	return ret;
+}
+
+static inline void
+cpfl_fxp_rule_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+	flow->rule = NULL;
+}
+
+static int
+cpfl_fxp_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim;
+	uint32_t i;
+	struct cpfl_vport *vport;
+
+	rim = flow->rule;
+	if (!rim) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "no such flow create by cpfl filter");
+
+		return -rte_errno;
+	}
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		ret = -rte_errno;
+		goto err;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules,
+				rim->rule_num, false);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to destroy cpfl filter rule");
+		goto err;
+	}
+
+	/* free mod index */
+	for (i = rim->pr_num; i < rim->rule_num; i++)
+		cpfl_fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
+err:
+	cpfl_fxp_rule_free(flow);
+	return ret;
+}
+
+static bool
+cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action,
+		       struct cpfl_rule_info_meta *rim,
+		       int i)
+{
+	if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		struct cpfl_rule_info *rinfo = &rim->rules[i];
+
+		rinfo->type = CPFL_RULE_TYPE_SEM;
+		rinfo->sem.prof_id = pr_action->sem.prof;
+		rinfo->sem.sub_prof_id = pr_action->sem.subprof;
+		rinfo->sem.key_byte_len = pr_action->sem.keysize;
+		rte_memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len);
+		rinfo->sem.pin_to_cache = CPFL_PIN_TO_CACHE_DEF;
+		rinfo->sem.fixed_fetch = CPFL_FIXED_FETCH_DEF;
+	} else {
+		PMD_DRV_LOG(ERR, "Invalid pattern item.");
+		return false;
+	}
+
+	return true;
+}
+
+static int
+cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_rule_info *match_rinfo,
+		       struct cpfl_rule_info *mod_rinfo,
+		       const struct cpfl_flow_mr_action *mr_action)
+{
+	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
+	uint32_t mod_idx;
+	int i;
+	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
+	union cpfl_action_set *act_set =
+		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
+
+	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
+		return -EINVAL;
+
+	*act_set = cpfl_act_mod_profile(CPFL_PREC_DEF,
+					mr_action->mod.prof,
+					CPFL_PTI_DEF,
+					0, /* append */
+					0, /* prepend */
+					CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_idx = cpfl_fxp_mod_idx_alloc(adapter);
+	if (mod_idx == CPFL_MAX_MOD_CONTENT_INDEX) {
+		PMD_DRV_LOG(ERR, "Out of Mod Index.");
+		return -ENOMEM;
+	}
+
+	*act_set = cpfl_act_mod_addr(CPFL_PREC_DEF, mod_idx);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
+	minfo->mod_obj_size = CPFL_MOD_OBJ_SIZE_DEF;
+	minfo->pin_mod_content = CPFL_PIN_MOD_CONTENT_DEF;
+	minfo->mod_index = mod_idx;
+	mod_rinfo->cookie = CPFL_MOD_COOKIE_DEF;
+	mod_rinfo->port_num = CPFL_PORT_NUM_DEF;
+	mod_rinfo->resp_req = CPFL_RESP_REQ_DEF;
+
+	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
+	for (i = 0; i < minfo->mod_content_byte_len; i++)
+		minfo->mod_content[i] = mr_action->mod.data[i];
+
+	return 0;
+}
+
+#define CPFL_FXP_MAX_QREGION_SIZE 128
+#define CPFL_INVALID_QUEUE_ID -2
+static int
+cpfl_fxp_parse_action(struct cpfl_itf *itf,
+		      const struct rte_flow_action *actions,
+		      const struct cpfl_flow_mr_action *mr_action,
+		      struct cpfl_rule_info_meta *rim,
+		      int priority,
+		      int index)
+{
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *rss;
+	struct rte_eth_dev_data *data;
+	enum rte_flow_action_type action_type;
+	struct cpfl_vport *vport;
+	/* used when action is PORT_REPRESENTOR type */
+	struct cpfl_itf *dst_itf;
+	uint16_t dev_id; /* vsi id */
+	int queue_id = -1;
+	bool fwd_vsi = false;
+	bool fwd_q = false;
+	uint32_t i;
+	struct cpfl_rule_info *rinfo = &rim->rules[index];
+	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
+
+	priority = CPFL_PREC_MAX - priority;
+	for (action = actions; action->type !=
+	     RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			if (!fwd_vsi)
+				fwd_vsi = true;
+			else
+				goto err;
+
+			act_ethdev = action->conf;
+			dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id);
+
+			if (!dst_itf)
+				goto err;
+
+			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
+				vport = (struct cpfl_vport *)dst_itf;
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			} else {
+				queue_id = CPFL_INVALID_QUEUE_ID;
+			}
+
+			dev_id = cpfl_get_vsi_id(dst_itf);
+
+			if (dev_id == CPFL_INVALID_HW_ID)
+				goto err;
+
+			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			act_q = action->conf;
+			data = itf->data;
+			if (act_q->index >= data->nb_rx_queues)
+				goto err;
+
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += act_q->index;
+			*act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			if (rss->queue_num <= 1)
+				goto err;
+			for (i = 0; i < rss->queue_num - 1; i++) {
+				if (rss->queue[i + 1] != rss->queue[i] + 1)
+					goto err;
+			}
+			data = itf->data;
+			if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues)
+				goto err;
+			if (!(rte_is_power_of_2(rss->queue_num) &&
+			      rss->queue_num <= CPFL_FXP_MAX_QREGION_SIZE))
+				goto err;
+
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += rss->queue[0];
+			*act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id,
+								  log(rss->queue_num) / log(2), 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			(*act_set).data = cpfl_act_drop(priority).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			(*act_set).data = cpfl_act_set_commit_mode(priority, 0).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			goto err;
+		}
+	}
+
+	if (mr_action) {
+		uint32_t i;
+
+		for (i = 0; i < rim->mr_num; i++)
+			if (cpfl_parse_mod_content(itf->adapter, rinfo,
+						   &rim->rules[rim->pr_num + i],
+						   &mr_action[i]))
+				goto err;
+	}
+
+	return 0;
+
+err:
+	PMD_DRV_LOG(ERR, "Invalid action type");
+	return -EINVAL;
+}
+
+static void
+cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo)
+{
+	if (cpfl_rule_cookie == ~0llu)
+		cpfl_rule_cookie = CPFL_COOKIE_DEF;
+	rinfo->cookie = cpfl_rule_cookie++;
+	rinfo->host_id = CPFL_HOST_ID_DEF;
+	rinfo->port_num = CPFL_PORT_NUM_DEF;
+	rinfo->resp_req = CPFL_RESP_REQ_DEF;
+	rinfo->clear_mirror_1st_state = CPFL_CLEAR_MIRROR_1ST_STATE_DEF;
+}
+
+static bool
+cpfl_is_mod_action(const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			return true;
+		default:
+			continue;
+		}
+	}
+	return false;
+}
+
+static int
+cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      void **meta)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_pr_action pr_action = { 0 };
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_flow_mr_action mr_action[CPFL_MAX_MR_ACTION_NUM] = { 0 };
+	uint32_t pr_num = 0;
+	uint32_t mr_num = 0;
+	struct cpfl_rule_info_meta *rim;
+	int ret;
+
+	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "No Match pattern support.");
+		return -EINVAL;
+	}
+
+	if (cpfl_is_mod_action(actions)) {
+		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "action parse fails.");
+			return -EINVAL;
+		}
+		mr_num++;
+	}
+
+	pr_num = 1;
+	rim = rte_zmalloc(NULL,
+			  sizeof(struct cpfl_rule_info_meta) +
+			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
+			  0);
+	if (!rim)
+		return -ENOMEM;
+
+	rim->pr_action = pr_action;
+	rim->pr_num = pr_num;
+	rim->mr_num = mr_num;
+	rim->rule_num = pr_num + mr_num;
+
+	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid pattern");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid action");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	cpfl_fill_rinfo_default_value(&rim->rules[0]);
+
+	if (!meta)
+		rte_free(rim);
+	else
+		*meta = rim;
+
+	return 0;
+}
+
+static int
+cpfl_fxp_mod_init(struct cpfl_adapter_ext *ad)
+{
+	uint32_t size = rte_bitmap_get_memory_footprint(CPFL_MAX_MOD_CONTENT_INDEX);
+	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+	if (!mem)
+		return -ENOMEM;
+
+	/* a set bit represent a free slot */
+	ad->mod_bm = rte_bitmap_init_with_all_set(CPFL_MAX_MOD_CONTENT_INDEX, mem, size);
+	if (!ad->mod_bm) {
+		rte_free(mem);
+		return -EINVAL;
+	}
+
+	ad->mod_bm_mem = mem;
+
+	return 0;
+}
+
+static void
+cpfl_fxp_mod_uninit(struct cpfl_adapter_ext *ad)
+{
+	rte_free(ad->mod_bm_mem);
+	ad->mod_bm_mem = NULL;
+	ad->mod_bm = NULL;
+}
+
+static uint32_t
+cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad)
+{
+	uint64_t slab = 0;
+	uint32_t pos = 0;
+
+	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
+		return CPFL_MAX_MOD_CONTENT_INDEX;
+
+	pos += __builtin_ffsll(slab) - 1;
+	rte_bitmap_clear(ad->mod_bm, pos);
+
+	return pos;
+}
+
+static void
+cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
+{
+	rte_bitmap_set(ad->mod_bm, idx);
+}
+
+static int
+cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
+	       struct rte_flow *flow __rte_unused,
+	       struct rte_flow_query_count *count __rte_unused,
+	       struct rte_flow_error *error)
+{
+	rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_HANDLE,
+			   NULL,
+			   "count action not supported by this module");
+
+	return -rte_errno;
+}
+
+static void
+cpfl_fxp_uninit(struct cpfl_adapter_ext *ad)
+{
+	cpfl_fxp_mod_uninit(ad);
+}
+
+static int
+cpfl_fxp_init(struct cpfl_adapter_ext *ad)
+{
+	int ret = 0;
+
+	ret = cpfl_fxp_mod_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct
+cpfl_flow_engine cpfl_fxp_engine = {
+	.type = CPFL_FLOW_ENGINE_FXP,
+	.init = cpfl_fxp_init,
+	.uninit = cpfl_fxp_uninit,
+	.create = cpfl_fxp_create,
+	.destroy = cpfl_fxp_destroy,
+	.query_count = cpfl_fxp_query,
+	.parse_pattern_action = cpfl_fxp_parse_pattern_action,
+};
+
+RTE_INIT(cpfl_sw_engine_init)
+{
+	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
+
+	cpfl_flow_engine_register(engine);
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 6118a16329..5fd1cbd045 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -46,6 +46,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
 	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
 	    'cpfl_fxp_rule.c',
+	    'cpfl_flow_engine_fxp.c',
     )
     ext_deps += jansson_dep
 endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v7 8/8] net/cpfl: add flow support for representor
  2023-09-26 18:16         ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
                             ` (6 preceding siblings ...)
  2023-09-26 18:17           ` [PATCH v7 7/8] net/cpfl: add fxp flow engine yuying.zhang
@ 2023-09-26 18:17           ` yuying.zhang
  2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
  8 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-26 18:17 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add flow support for representor, so representor can
create, destroy, validate and flush rules.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 doc/guides/nics/cpfl.rst                | 19 +++++-
 doc/guides/rel_notes/release_23_11.rst  |  1 +
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 88 ++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_representor.c     | 29 ++++++++
 4 files changed, 133 insertions(+), 4 deletions(-)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index e17347d15c..ee2fabd99c 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -197,8 +197,23 @@ low level hardware resources.
 
     * For Ubuntu, it can be installed using `apt install libjansson-dev`
 
-- run testpmd with the json file
+- run testpmd with the json file, create two vports and two VF representors
 
    .. code-block:: console
 
-   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0-1],representor=vf[0,1],flow_parser="refpkg.json" -- -i
+
+#. Create one flow to forward ETH-IPV4-TCP from I/O port to a local(CPF's) vport. Flow should be created on
+   vport X. Group M should match fxp module. Action port_representor Y means forward packet to local vport Y.
+   If want to send a packet to VF representor, action represented_port can be used::
+
+   .. code-block:: console
+
+   flow create X ingress group M pattern eth dst is 00:01:00:00:03:14 / ipv4 src is 192.168.0.1 \
+   dst is 192.168.0.2 / tcp / end actions port_representor port_id Y / end
+
+#. Send a matched packet, and it should be displayed on PMD::
+
+   .. code-block:: console
+
+   sendp(Ether(dst='00:01:00:00:03:14')/IP(src='192.168.0.1',dst='192.168.0.2')/TCP(),iface="ens25f0")
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 3d9be208d0..bad71ad3fd 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -81,6 +81,7 @@ New Features
 * **Updated Intel cpfl driver.**
 
   * Added support for port representor.
+  * Added support for rte_flow.
 
 Removed Items
 -------------
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index 154af5bd35..fed18d8349 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -73,6 +73,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 	struct cpfl_adapter_ext *ad = itf->adapter;
 	struct cpfl_rule_info_meta *rim = meta;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	if (!rim)
 		return ret;
@@ -83,6 +84,10 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 		 * Even index is tx queue and odd index is rx queue.
 		 */
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -122,6 +127,7 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	uint32_t i;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	rim = flow->rule;
 	if (!rim) {
@@ -135,6 +141,10 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	if (itf->type == CPFL_ITF_TYPE_VPORT) {
 		vport = (struct cpfl_vport *)itf;
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -257,6 +267,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 	int queue_id = -1;
 	bool fwd_vsi = false;
 	bool fwd_q = false;
+	bool is_vsi;
 	uint32_t i;
 	struct cpfl_rule_info *rinfo = &rim->rules[index];
 	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
@@ -267,6 +278,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 		action_type = action->type;
 		switch (action_type) {
 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
 			if (!fwd_vsi)
 				fwd_vsi = true;
 			else
@@ -285,12 +297,20 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 				queue_id = CPFL_INVALID_QUEUE_ID;
 			}
 
-			dev_id = cpfl_get_vsi_id(dst_itf);
+			is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
+				  dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR);
+			if (is_vsi)
+				dev_id = cpfl_get_vsi_id(dst_itf);
+			else
+				dev_id = cpfl_get_port_id(dst_itf);
 
 			if (dev_id == CPFL_INVALID_HW_ID)
 				goto err;
 
-			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			if (is_vsi)
+				*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			else
+				*act_set = cpfl_act_fwd_port(0, priority, 0, dev_id);
 			act_set++;
 			rinfo->act_byte_len += sizeof(union cpfl_action_set);
 			break;
@@ -414,6 +434,64 @@ cpfl_is_mod_action(const struct rte_flow_action actions[])
 	return false;
 }
 
+static bool
+cpfl_fxp_get_metadata_port(struct cpfl_itf *itf,
+			   const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	const struct rte_flow_action_ethdev *ethdev;
+	struct cpfl_itf *target_itf;
+	bool ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		ret = cpfl_metadata_write_port_id(itf);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "fail to write port id");
+			return false;
+		}
+	}
+
+	ret = cpfl_metadata_write_sourcevsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write source vsi id");
+		return false;
+	}
+
+	ret = cpfl_metadata_write_vsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write vsi id");
+		return false;
+	}
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			ethdev = (const struct rte_flow_action_ethdev *)action->conf;
+			target_itf = cpfl_get_itf_by_port_id(ethdev->port_id);
+			if (!target_itf) {
+				PMD_DRV_LOG(ERR, "fail to get target_itf by port id");
+				return false;
+			}
+			ret = cpfl_metadata_write_targetvsi(target_itf);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "fail to write target vsi id");
+				return false;
+			}
+			break;
+		default:
+			continue;
+		}
+	}
+
+	return true;
+}
+
 static int
 cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
@@ -430,6 +508,12 @@ cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	int ret;
 
+	ret = cpfl_fxp_get_metadata_port(itf, actions);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Fail to save metadata.");
+		return -EINVAL;
+	}
+
 	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "No Match pattern support.");
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 4d15a26c80..de3b426727 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,8 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 static int
 cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
@@ -374,6 +376,22 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
+static int
+cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev,
+			   const struct rte_flow_ops **ops)
+{
+	if (!dev)
+		return -EINVAL;
+
+#ifdef RTE_HAS_JANSSON
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -385,6 +403,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
 
 	.link_update		= cpfl_repr_link_update,
+	.flow_ops_get		= cpfl_dev_repr_flow_ops_get,
 };
 
 static int
@@ -393,6 +412,7 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
 	struct cpfl_repr_param *param = init_param;
 	struct cpfl_adapter_ext *adapter = param->adapter;
+	int ret;
 
 	repr->repr_id = param->repr_id;
 	repr->vport_info = param->vport_info;
@@ -402,6 +422,15 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
 		repr->func_up = true;
 
+	TAILQ_INIT(&repr->itf.flow_list);
+	memset(repr->itf.dma, 0, sizeof(repr->itf.dma));
+	memset(repr->itf.msg, 0, sizeof(repr->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		return ret;
+
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [PATCH v7 1/8] net/cpfl: add json parser for rte flow pattern rules
  2023-09-26 18:16           ` [PATCH v7 1/8] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
@ 2023-09-26 19:03             ` Stephen Hemminger
  2023-09-27  1:21               ` Zhang, Qi Z
  0 siblings, 1 reply; 128+ messages in thread
From: Stephen Hemminger @ 2023-09-26 19:03 UTC (permalink / raw)
  To: yuying.zhang; +Cc: dev, qi.z.zhang, jingjing.wu, beilei.xing, Wenjing Qiao

On Tue, 26 Sep 2023 18:16:56 +0000
yuying.zhang@intel.com wrote:

> From: Wenjing Qiao <wenjing.qiao@intel.com>
> 
> Add devargs "flow_parser" for rte flow json parser which depends
> on jansson library.
> 
> Example:
>     -a ca:00.0,flow_parser="refpkg.json"
> 
> Add json parser for rte flow pattern rules which can build rules
> that maps from a set of rte flow items to hardware representations.
> 
> The cpfl PMD supports utilizing a JSON configuration file to translate
> rte flow tokens into low level hardware resources. The JSON configuration
> file is provided by the hardware vendor and is intended to work exclusively
> with a specific P4 pipeline configuration, which must be compiled and
> programmed into the hardware.
> 
> The format of the JSON file strictly follows the internal specifications
> of the hardware vendor and is not meant to be modified directly by
> users.
> 
> Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>

It is good to see the code is now using the same JSON parser as elsewhere in DPDK.

How does this interact with the P4 work done by Cristian, is this part of that?
Is this treated as opaque firmware?
Why is it driver specific? DPDK P4 support needs to be vendor neutral to acceptable.

^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v7 1/8] net/cpfl: add json parser for rte flow pattern rules
  2023-09-26 19:03             ` Stephen Hemminger
@ 2023-09-27  1:21               ` Zhang, Qi Z
  0 siblings, 0 replies; 128+ messages in thread
From: Zhang, Qi Z @ 2023-09-27  1:21 UTC (permalink / raw)
  To: Stephen Hemminger, Zhang, Yuying
  Cc: dev, Wu, Jingjing, Xing, Beilei, Qiao, Wenjing, Dumitrescu, Cristian



> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: Wednesday, September 27, 2023 3:04 AM
> To: Zhang, Yuying <yuying.zhang@intel.com>
> Cc: dev@dpdk.org; Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Qiao, Wenjing
> <wenjing.qiao@intel.com>
> Subject: Re: [PATCH v7 1/8] net/cpfl: add json parser for rte flow pattern rules
> 
> On Tue, 26 Sep 2023 18:16:56 +0000
> yuying.zhang@intel.com wrote:
> 
> > From: Wenjing Qiao <wenjing.qiao@intel.com>
> >
> > Add devargs "flow_parser" for rte flow json parser which depends on
> > jansson library.
> >
> > Example:
> >     -a ca:00.0,flow_parser="refpkg.json"
> >
> > Add json parser for rte flow pattern rules which can build rules that
> > maps from a set of rte flow items to hardware representations.
> >
> > The cpfl PMD supports utilizing a JSON configuration file to translate
> > rte flow tokens into low level hardware resources. The JSON
> > configuration file is provided by the hardware vendor and is intended
> > to work exclusively with a specific P4 pipeline configuration, which
> > must be compiled and programmed into the hardware.
> >
> > The format of the JSON file strictly follows the internal
> > specifications of the hardware vendor and is not meant to be modified
> > directly by users.
> >
> > Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
> 
> It is good to see the code is now using the same JSON parser as elsewhere in
> DPDK.
> 
> How does this interact with the P4 work done by Cristian, is this part of that?
> Is this treated as opaque firmware?

This solution is not related with p4 support.

Though the device is configured with P4, but there is no P4 aware interface be leveraged here.
The JSON file direct PMD to translate the  rte_flow fixed pattern / action into low level hardware configure directly. 

The purpose of introducing this solution is to facilitate a smooth migration for certain customers who wish to transition their existing applications  from other NICs to IPU. 
But, of cause this approach have limitations for customers to fully leverage the capabilities of the P4 device, as not all offloading features can be mapped into fixed functions.

The solution based on Cristian's patch will be enabled in a later release. Both solutions will be available exclusively at runtime, depending on the user's intention.

> Why is it driver specific? DPDK P4 support needs to be vendor neutral to
> acceptable.

Even with the P4 solution, the PMD must determine how to map P4 tables and actions into low-level hardware representations. This knowledge is generated by the compiler, and there are a couple of options to enable the PMD to access this knowledge:

1. Embedding this knowledge into a storage space on the hardware, allowing the PMD to learn from the hardware through internal firmware APIs.
2. Storing the knowledge in the file system and having the PMD load this file using devargs.

But, these options are vendor-specific in nature.

Regards
Qi 



^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v8 0/9] add rte flow support for cpfl
  2023-09-26 18:16         ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
                             ` (7 preceding siblings ...)
  2023-09-26 18:17           ` [PATCH v7 8/8] net/cpfl: add flow support for representor yuying.zhang
@ 2023-09-27 12:54           ` yuying.zhang
  2023-09-27 12:54             ` [PATCH v8 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
                               ` (10 more replies)
  8 siblings, 11 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-27 12:54 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

This patchset add rte flow support for cpfl driver.
It depends on the following patch set:
http://patchwork.dpdk.org/project/dpdk/cover/20230912173039.1612287-1-beilei.xing@intel.com/

Wenjing Qiao (2):
  net/cpfl: add json parser for rte flow pattern rules
  net/cpfl: build action mapping rules from JSON

Yuying Zhang (7):
  net/cpfl: set up rte flow skeleton
  net/cpfl: set up control path
  net/cpfl: add FXP low level implementation
  net/cpfl: add fxp rule module
  net/cpfl: add fxp flow engine
  net/cpfl: add flow support for representor
  net/cpfl: add support of to represented port action
---
v8:
* fix compile issues
* refine document and separate patch with different features
v7:
* refine commit log
* fix compile issues

v6:
* use existed jansson instead of json-c library.
* refine "add FXP low level implementation"

V5:
* Add input validation for some functions.

 doc/guides/nics/cpfl.rst                |   52 +
 doc/guides/rel_notes/release_23_11.rst  |    1 +
 drivers/net/cpfl/cpfl_actions.h         |  858 +++++++++++
 drivers/net/cpfl/cpfl_controlq.c        |  801 ++++++++++
 drivers/net/cpfl/cpfl_controlq.h        |   75 +
 drivers/net/cpfl/cpfl_ethdev.c          |  392 ++++-
 drivers/net/cpfl/cpfl_ethdev.h          |  128 ++
 drivers/net/cpfl/cpfl_flow.c            |  339 +++++
 drivers/net/cpfl/cpfl_flow.h            |   85 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c |  667 ++++++++
 drivers/net/cpfl/cpfl_flow_parser.c     | 1839 +++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h     |  267 ++++
 drivers/net/cpfl/cpfl_fxp_rule.c        |  296 ++++
 drivers/net/cpfl/cpfl_fxp_rule.h        |   68 +
 drivers/net/cpfl/cpfl_representor.c     |   29 +
 drivers/net/cpfl/cpfl_rules.c           |  127 ++
 drivers/net/cpfl/cpfl_rules.h           |  306 ++++
 drivers/net/cpfl/cpfl_vchnl.c           |  144 ++
 drivers/net/cpfl/meson.build            |   12 +
 19 files changed, 6485 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v8 1/9] net/cpfl: add json parser for rte flow pattern rules
  2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
@ 2023-09-27 12:54             ` yuying.zhang
  2023-09-27 12:54             ` [PATCH v8 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
                               ` (9 subsequent siblings)
  10 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-27 12:54 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Add devargs "flow_parser" for rte flow json parser which depends
on jansson library.

Example:
    -a ca:00.0,flow_parser="refpkg.json"

Add json parser for rte flow pattern rules which can build rules
that maps from a set of rte flow items to hardware representations.

The cpfl PMD supports utilizing a JSON configuration file to translate
rte flow tokens into low level hardware resources. The JSON configuration
file is provided by the hardware vendor and is intended to work exclusively
with a specific P4 pipeline configuration, which must be compiled and
programmed into the hardware.

The format of the JSON file strictly follows the internal specifications
of the hardware vendor and is not meant to be modified directly by
users.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 doc/guides/nics/cpfl.rst            |   38 +
 drivers/net/cpfl/cpfl_ethdev.c      |   38 +-
 drivers/net/cpfl/cpfl_ethdev.h      |   76 ++
 drivers/net/cpfl/cpfl_flow_parser.c | 1303 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  167 ++++
 drivers/net/cpfl/meson.build        |    7 +
 6 files changed, 1628 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 83a18c3f2e..e17347d15c 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -128,12 +128,32 @@ Runtime Configuration
 
     -a BDF,representor=vf[0-3],representor=c1pf1
 
+- ``flow_parser`` (default ``not enabled``)
+
+  The cpfl PMD supports utilizing a JSON config file to translate rte_flow tokens into
+  low-level hardware resources.
+
+  The JSON configuration file is provided by the hardware vendor and is intended to work
+  exclusively with a specific P4 pipeline configuration, which must be compiled and programmed
+  into the hardware.
+
+  The format of the JSON file strictly follows the internal specifications of the hardware
+  vendor and is not meant to be modified directly by users.
+
+  Using the ``devargs`` option ``flow_parser`` the user can specify the path
+  of a json file, for example::
+
+    -a ca:00.0,flow_parser="refpkg.json"
+
+  Then the PMD will load json file for device ``ca:00.0``.
+  The parameter is optional.
 
 Driver compilation and testing
 ------------------------------
 
 Refer to the document :doc:`build_and_test` for details.
 
+The jansson library must be installed to use rte_flow.
 
 Features
 --------
@@ -164,3 +184,21 @@ Hairpin queue
 E2100 Series can loopback packets from RX port to TX port.
 This feature is called port-to-port or hairpin.
 Currently, the PMD only supports single port hairpin.
+
+Rte_flow
+~~~~~~~~~~~~~
+
+PMD uses a json file to direct CPF PMD to parse rte_flow tokens into
+low level hardware resources.
+
+- Required Libraries
+
+  * jansson
+
+    * For Ubuntu, it can be installed using `apt install libjansson-dev`
+
+- run testpmd with the json file
+
+   .. code-block:: console
+
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 189072ab33..1745f703c8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -21,6 +21,10 @@
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
 
+#ifdef RTE_HAS_JANSSON
+#define CPFL_FLOW_PARSER	"flow_parser"
+#endif
+
 rte_spinlock_t cpfl_adapter_lock;
 /* A list for all adapters, one adapter matches one PCI device */
 struct cpfl_adapter_list cpfl_adapter_list;
@@ -31,6 +35,9 @@ static const char * const cpfl_valid_args_first[] = {
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
+#ifdef RTE_HAS_JANSSON
+	CPFL_FLOW_PARSER,
+#endif
 	NULL
 };
 
@@ -1537,6 +1544,24 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 	return 0;
 }
 
+#ifdef RTE_HAS_JANSSON
+static int
+parse_file(const char *key, const char *value, void *args)
+{
+	char *name = args;
+
+	if (strlen(value) > CPFL_FLOW_FILE_LEN - 1) {
+		PMD_DRV_LOG(ERR, "file path(%s) is too long.", value);
+		return -1;
+	}
+
+	PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key);
+	strlcpy(name, value, CPFL_FLOW_FILE_LEN);
+
+	return 0;
+}
+#endif
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
@@ -1585,7 +1610,18 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 				 &adapter->base.is_rx_singleq);
 	if (ret != 0)
 		goto fail;
-
+#ifdef RTE_HAS_JANSSON
+	if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) {
+		ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER,
+					 &parse_file, cpfl_args->flow_parser);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to parser flow_parser, ret: %d", ret);
+			goto fail;
+		}
+	} else {
+		cpfl_args->flow_parser[0] = '\0';
+	}
+#endif
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index d0dcc0cc05..383dbd14c6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -77,6 +77,11 @@
 #define CPFL_VPORT_LAN_PF	0
 #define CPFL_VPORT_LAN_VF	1
 
+#define CPFL_FLOW_FILE_LEN 100
+#define CPFL_INVALID_HW_ID	UINT16_MAX
+#define CPFL_META_CHUNK_LENGTH	1024
+#define CPFL_META_LENGTH	32
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -99,6 +104,7 @@ struct cpfl_devargs {
 	uint16_t req_vport_nb;
 	uint8_t repr_args_num;
 	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
+	char flow_parser[CPFL_FLOW_FILE_LEN];
 };
 
 struct p2p_queue_chunks_info {
@@ -165,6 +171,20 @@ struct cpfl_repr {
 	bool func_up; /* If the represented function is up */
 };
 
+struct cpfl_metadata_chunk {
+	int type;
+	uint8_t data[CPFL_META_CHUNK_LENGTH];
+};
+
+/**
+ * It is driver's responsibility to simlulate a metadata buffer which
+ * can be used as data source to fill the key of a flow rule.
+ */
+struct cpfl_metadata {
+	int length;
+	struct cpfl_metadata_chunk chunks[CPFL_META_LENGTH];
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -185,6 +205,8 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
+
+	struct cpfl_metadata meta;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -211,4 +233,58 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
+static inline uint16_t
+cpfl_get_port_id(struct cpfl_itf *itf)
+{
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		struct cpfl_vport *vport = (void *)itf;
+
+		return vport->base.devarg_id;
+	}
+
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline uint16_t
+cpfl_get_vsi_id(struct cpfl_itf *itf)
+{
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_vport_info *info;
+	uint32_t vport_id;
+	int ret;
+	struct cpfl_vport_id vport_identity;
+
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		struct cpfl_repr *repr = (void *)itf;
+
+		return repr->vport_info->vport.info.vsi_id;
+	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
+
+		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		/* host: CPFL_HOST0_CPF_ID, acc: CPFL_ACC_CPF_ID */
+		vport_identity.pf_id = CPFL_ACC_CPF_ID;
+		vport_identity.vf_id = 0;
+		vport_identity.vport_id = vport_id;
+		ret = rte_hash_lookup_data(adapter->vport_map_hash,
+					   &vport_identity,
+					   (void **)&info);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "vport id not exist");
+			goto err;
+		}
+
+		return info->vport.info.vsi_id;
+	}
+
+err:
+	return CPFL_INVALID_HW_ID;
+}
+
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
new file mode 100644
index 0000000000..308f9c9736
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -0,0 +1,1303 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <arpa/inet.h>
+#include <asm-generic/errno-base.h>
+#include <stdint.h>
+
+#include "cpfl_flow_parser.h"
+#include "cpfl_ethdev.h"
+#include "rte_malloc.h"
+
+static enum rte_flow_item_type
+cpfl_get_item_type_by_str(const char *type)
+{
+	if (strcmp(type, "eth") == 0)
+		return RTE_FLOW_ITEM_TYPE_ETH;
+	else if (strcmp(type, "ipv4") == 0)
+		return RTE_FLOW_ITEM_TYPE_IPV4;
+	else if (strcmp(type, "tcp") == 0)
+		return RTE_FLOW_ITEM_TYPE_TCP;
+	else if (strcmp(type, "udp") == 0)
+		return RTE_FLOW_ITEM_TYPE_UDP;
+	else if (strcmp(type, "vxlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VXLAN;
+	else if (strcmp(type, "icmp") == 0)
+		return RTE_FLOW_ITEM_TYPE_ICMP;
+	else if (strcmp(type, "vlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VLAN;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ITEM_TYPE_VOID;
+}
+
+static const char *
+cpfl_json_t_to_string(json_t *object, const char *name)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return NULL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return NULL;
+	}
+
+	return json_string_value(subobject);
+}
+
+static int
+cpfl_json_t_to_uint16(json_t *object, const char *name, uint16_t *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (uint16_t)json_integer_value(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_json_t_to_uint32(json_t *object, const char *name, uint32_t *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (uint32_t)json_integer_value(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_attr(json_t *ob_pr_key_attrs, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len;
+	struct cpfl_flow_js_pr_key_attr *attr;
+
+	len = json_array_size(ob_pr_key_attrs);
+	js_pr->key.attributes = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_attr), 0);
+	if (!js_pr->key.attributes) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->key.attr_size = len;
+	attr = js_pr->key.attributes;
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		const char *name;
+		uint16_t value = 0;
+		int ret;
+
+		object = json_array_get(ob_pr_key_attrs, i);
+		name = cpfl_json_t_to_string(object, "Name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'Name'.");
+			goto err;
+		}
+		ret = cpfl_json_t_to_uint16(object, "Value", &value);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+			goto err;
+		}
+		if (strcmp(name, "ingress") == 0) {
+			attr->ingress = value;
+		} else if (strcmp(name, "egress") == 0) {
+			attr->egress = value;
+		} else {
+			/* TODO: more... */
+			PMD_DRV_LOG(ERR, "Not support attr name: %s.", name);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	rte_free(js_pr->key.attributes);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto_field(json_t *ob_fields,
+				     struct cpfl_flow_js_pr_key_proto *js_field)
+{
+	int len, i;
+
+	if (!ob_fields)
+		return 0;
+	len = json_array_size(ob_fields);
+	if (len == 0)
+		return 0;
+	js_field->fields_size = len;
+	js_field->fields =
+	    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len, 0);
+	if (!js_field->fields) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		const char *name, *mask;
+
+		object = json_array_get(ob_fields, i);
+		name = cpfl_json_t_to_string(object, "name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
+			goto err;
+		}
+		if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+			PMD_DRV_LOG(ERR, "The 'name' is too long.");
+			goto err;
+		}
+		memcpy(js_field->fields[i].name, name, strlen(name));
+
+		if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
+		    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			mask = cpfl_json_t_to_string(object, "mask");
+			if (!mask) {
+				PMD_DRV_LOG(ERR, "Can not parse string 'mask'.");
+				goto err;
+			}
+			if (strlen(mask) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+				PMD_DRV_LOG(ERR, "The 'mask' is too long.");
+				goto err;
+			}
+			memcpy(js_field->fields[i].mask, mask, strlen(mask));
+		} else {
+			uint32_t mask_32b;
+			int ret;
+
+			ret = cpfl_json_t_to_uint32(object, "mask", &mask_32b);
+			if (ret < 0) {
+				PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'.");
+				goto err;
+			}
+			js_field->fields[i].mask_32b = mask_32b;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_field->fields);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto(json_t *ob_pr_key_protos, struct cpfl_flow_js_pr *js_pr)
+{
+	int len, i, ret;
+
+	len = json_array_size(ob_pr_key_protos);
+	if (len == 0)
+		return 0;
+	js_pr->key.proto_size = len;
+	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto) * len, 0);
+	if (!js_pr->key.protocols) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_t *object, *ob_fields;
+		const char *type;
+		enum rte_flow_item_type item_type;
+
+		object = json_array_get(ob_pr_key_protos, i);
+		/* pr->key->proto->type */
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		item_type = cpfl_get_item_type_by_str(type);
+		if (item_type == RTE_FLOW_ITEM_TYPE_VOID)
+			goto err;
+		js_pr->key.protocols[i].type = item_type;
+		/* pr->key->proto->fields */
+		ob_fields = json_object_get(object, "fields");
+		ret = cpfl_flow_js_pattern_key_proto_field(ob_fields,
+							   &js_pr->key.protocols[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	rte_free(js_pr->key.protocols);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_proto(json_t *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	uint16_t layer = 0, offset = 0, mask = 0;
+	const char *header;
+	enum rte_flow_item_type type;
+	int ret;
+
+	ret = cpfl_json_t_to_uint16(ob_value, "layer", &layer);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+		return -EINVAL;
+	}
+
+	header = cpfl_json_t_to_string(ob_value, "header");
+	if (!header) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "offset", &offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "mask", &mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
+		return -EINVAL;
+	}
+	type = cpfl_get_item_type_by_str(header);
+	if (type == RTE_FLOW_ITEM_TYPE_VOID)
+		return -EINVAL;
+	js_fv->proto.layer = layer;
+	js_fv->proto.offset = offset;
+	js_fv->proto.mask = mask;
+	js_fv->proto.header = type;
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_metadata(json_t *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	int ret;
+
+	ret = cpfl_json_t_to_uint16(ob_value, "type", &js_fv->meta.type);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "offset", &js_fv->meta.offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "mask", &js_fv->meta.mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv(json_t *ob_fvs, struct cpfl_flow_js_pr_action *js_act)
+{
+	int len, i;
+
+	len = json_array_size(ob_fvs);
+	if (len == 0)
+		return 0;
+	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0);
+	if (!js_act->sem.fv) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_act->sem.fv_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_fv *js_fv;
+		json_t *object, *ob_value;
+		uint16_t offset = 0;
+		const char *type;
+		int ret;
+
+		js_fv = &js_act->sem.fv[i];
+		object = json_array_get(ob_fvs, i);
+		ret = cpfl_json_t_to_uint16(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_fv->offset = offset;
+
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		ob_value = json_object_get(object, "value");
+		if (strcmp(type, "immediate") == 0) {
+			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
+			js_fv->immediate = json_integer_value(ob_value);
+		} else if (strcmp(type, "metadata") == 0) {
+			js_fv->type = CPFL_FV_TYPE_METADATA;
+			cpfl_flow_js_pattern_act_fv_metadata(ob_value, js_fv);
+		} else if (strcmp(type, "protocol") == 0) {
+			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
+			cpfl_flow_js_pattern_act_fv_proto(ob_value, js_fv);
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_act->sem.fv);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_per_act(json_t *ob_per_act, struct cpfl_flow_js_pr_action *js_act)
+{
+	const char *type;
+	int ret;
+
+	/* pr->actions->type */
+	type = cpfl_json_t_to_string(ob_per_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* pr->actions->data */
+	if (strcmp(type, "sem") == 0) {
+		json_t *ob_fvs, *ob_sem;
+
+		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+		ob_sem = json_object_get(ob_per_act, "data");
+		ret = cpfl_json_t_to_uint16(ob_sem, "profile", &js_act->sem.prof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_t_to_uint16(ob_sem, "subprofile", &js_act->sem.subprof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_t_to_uint16(ob_sem, "keysize", &js_act->sem.keysize);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
+			return -EINVAL;
+		}
+		ob_fvs = json_object_get(ob_sem, "fieldvectors");
+		ret = cpfl_flow_js_pattern_act_fv(ob_fvs, js_act);
+		if (ret < 0)
+			return ret;
+	} else {
+		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act(json_t *ob_pr_acts, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len, ret;
+
+	len = json_array_size(ob_pr_acts);
+	if (len == 0)
+		return 0;
+	js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action) * len, 0);
+	if (!js_pr->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->actions_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_pr_action *js_act;
+		json_t *object;
+
+		object = json_array_get(ob_pr_acts, i);
+		js_act = &js_pr->actions[i];
+		ret = cpfl_flow_js_pattern_per_act(object, js_act);
+		if (ret < 0) {
+			rte_free(js_pr->actions);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * The patterns object array defines a set of rules directing the PMD to match sequences of
+ * rte_flow protocol headers and translate them into profile/field vectors for each pipeline
+ * stage. This object is mandatory.
+ */
+static int
+cpfl_flow_js_pattern_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_t *ob_prs;
+	int i, len;
+
+	/* Pattern Rules */
+	ob_prs = json_object_get(ob_root, "patterns");
+	if (!ob_prs) {
+		PMD_DRV_LOG(ERR, "The patterns is mandatory.");
+		return -EINVAL;
+	}
+
+	len = json_array_size(ob_prs);
+	if (len == 0)
+		return 0;
+	parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len, 0);
+	if (!parser->patterns) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	parser->pr_size = len;
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		json_t *ob_pr_actions;
+		json_t *ob_pr_key;
+		json_t *ob_pr_key_protos;
+		json_t *ob_pr_key_attrs;
+		int ret;
+
+		object = json_array_get(ob_prs, i);
+		/* pr->key */
+		ob_pr_key = json_object_get(object, "key");
+		/* pr->key->protocols */
+		ob_pr_key_protos = json_object_get(ob_pr_key, "protocols");
+		ret = cpfl_flow_js_pattern_key_proto(ob_pr_key_protos, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->protocols.");
+			goto err;
+		}
+		/* pr->key->attributes */
+		ob_pr_key_attrs = json_object_get(ob_pr_key, "attributes");
+		ret = cpfl_flow_js_pattern_key_attr(ob_pr_key_attrs, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->attributes.");
+			goto err;
+		}
+		/* pr->actions */
+		ob_pr_actions = json_object_get(object, "actions");
+		ret = cpfl_flow_js_pattern_act(ob_pr_actions, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->patterns);
+	return -EINVAL;
+}
+
+static int
+cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	int ret = 0;
+
+	ret = cpfl_flow_js_pattern_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char *filename)
+{
+	struct cpfl_flow_js_parser *parser;
+	json_error_t json_error;
+	json_t *root;
+	int ret;
+
+	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser), 0);
+	if (!parser) {
+		PMD_DRV_LOG(ERR, "Not enough memory to create flow parser.");
+		return -ENOMEM;
+	}
+	root = json_load_file(filename, 0, &json_error);
+	if (!root) {
+		PMD_DRV_LOG(ERR, "Bad JSON file \"%s\": %s", filename, json_error.text);
+		goto free_parser;
+	}
+	ret = cpfl_parser_init(root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parser init failed.");
+		goto free_parser;
+	}
+	*flow_parser = parser;
+	json_decref(root);
+
+	return 0;
+free_parser:
+	rte_free(parser);
+	return -EINVAL;
+}
+
+static void
+cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
+{
+	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM)
+		rte_free(pr_act->sem.fv);
+}
+
+int
+cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
+{
+	int i, j;
+
+	if (!parser)
+		return 0;
+
+	for (i = 0; i < parser->pr_size; i++) {
+		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
+
+		if (!pattern)
+			continue;
+		for (j = 0; j < pattern->key.proto_size; j++)
+			rte_free(pattern->key.protocols[j].fields);
+		rte_free(pattern->key.protocols);
+		rte_free(pattern->key.attributes);
+
+		for (j = 0; j < pattern->actions_size; j++) {
+			struct cpfl_flow_js_pr_action *pr_act;
+
+			pr_act = &pattern->actions[j];
+			cpfl_parser_free_pr_action(pr_act);
+		}
+		rte_free(pattern->actions);
+	}
+	rte_free(parser->patterns);
+	rte_free(parser);
+
+	return 0;
+}
+
+static int
+cpfl_get_items_length(const struct rte_flow_item *items)
+{
+	int length = 0;
+	const struct rte_flow_item *item = items;
+
+	while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
+		       uint16_t offset, uint8_t *fv)
+{
+	uint16_t v_layer, v_offset, v_mask;
+	enum rte_flow_item_type v_header;
+	int j, layer, length;
+	uint16_t temp_fv;
+
+	length = cpfl_get_items_length(items);
+	v_layer = js_fv->proto.layer;
+	v_header = js_fv->proto.header;
+	v_offset = js_fv->proto.offset;
+	v_mask = js_fv->proto.mask;
+	layer = 0;
+	for (j = 0; j < length - 1; j++) {
+		if (items[j].type == v_header) {
+			if (layer == v_layer) {
+				/* copy out 16 bits from offset */
+				const uint8_t *pointer;
+
+				pointer = &(((const uint8_t *)(items[j].spec))[v_offset]);
+				temp_fv = ntohs((*((const uint16_t *)pointer)) & v_mask);
+				fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+				fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+				break;
+			}
+			layer++;
+		} /* TODO: more type... */
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_fieldvectors(struct cpfl_itf *itf, struct cpfl_flow_js_fv *js_fvs, int size,
+			uint8_t *fv, const struct rte_flow_item *items)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		uint16_t offset, temp_fv, value_int;
+		enum cpfl_flow_js_fv_type type;
+		struct cpfl_flow_js_fv *js_fv;
+
+		js_fv = &js_fvs[i];
+		offset = js_fv->offset;
+		type = js_fv->type;
+		if (type == CPFL_FV_TYPE_IMMEDIATE) {
+			value_int = js_fv->immediate;
+			temp_fv = (value_int << 8) & 0xff00;
+			fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+		} else if (type == CPFL_FV_TYPE_METADATA) {
+			uint16_t type, v_offset, mask;
+
+			type = js_fv->meta.type;
+			v_offset = js_fv->meta.offset;
+			mask = js_fv->meta.mask;
+			temp_fv = cpfl_metadata_read16(&itf->adapter->meta, type, v_offset) & mask;
+			fv[2 * offset] = (uint8_t)(temp_fv & 0x00ff);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv >> 8);
+		} else if (type == CPFL_FV_TYPE_PROTOCOL) {
+			ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv);
+			if (ret)
+				return ret;
+		} else {
+			PMD_DRV_LOG(DEBUG, "not support this type: %d.", type);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_pr_actions(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_pr_action *actions,
+		      int size,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		struct cpfl_flow_js_pr_action *pr_act;
+		enum cpfl_flow_pr_action_type type;
+
+		pr_act = &actions[i];
+		/* pr->actions->type */
+		type = pr_act->type;
+		/* pr->actions->data */
+		if (attr->group == 1  && type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+			struct cpfl_flow_js_pr_action_sem *sem = &pr_act->sem;
+
+			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+			pr_action->sem.prof = sem->prof;
+			pr_action->sem.subprof = sem->subprof;
+			pr_action->sem.keysize = sem->keysize;
+			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
+			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
+			ret = cpfl_parse_fieldvectors(itf, sem->fv, sem->fv_size,
+						      pr_action->sem.cpfl_flow_pr_fv, items);
+			return ret;
+		} else if (attr->group > 4 || attr->group == 0) {
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_eth_mask(const char *mask, struct rte_ether_addr addr)
+{
+	int i, ret;
+	struct rte_ether_addr mask_bytes;
+
+	ret = rte_ether_unformat_addr(mask, &mask_bytes);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "translate mac address from string to rte_ether_addr failed.");
+		return -EINVAL;
+	}
+	/* validate eth mask addr if match */
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+		if (mask_bytes.addr_bytes[i] != addr.addr_bytes[i])
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
+{
+	uint32_t out_addr;
+
+	/* 0: success; -EINVAL: invalid; -ENOTSUP: fail */
+	int ret = inet_pton(AF_INET, mask, &out_addr);
+
+	if (ret < 0)
+		return -EINVAL;
+	/* validate ipv4 mask addr if match */
+	if (out_addr != addr)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_eth *eth_mask)
+{
+	int field_size, j;
+	int flag_dst_addr, flag_src_addr, flag_ether_type;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !eth_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_ether_type = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name, *s_mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst, more see Field Mapping
+		 */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		if (strcmp(name, "src_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->src) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->dst) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "ether_type") == 0) {
+			uint16_t mask = (uint16_t)field->mask_32b;
+
+			if (mask != eth_mask->type)
+				return -EINVAL;
+			flag_ether_type = true;
+		} else {
+			/* TODO: more type... */
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (strcmp((const char *)eth_mask->src.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (strcmp((const char *)eth_mask->dst.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_ether_type) {
+		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+	int field_size, j;
+	int flag_next_proto_id, flag_src_addr, flag_dst_addr;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !ipv4_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_next_proto_id = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+
+		field = &proto->fields[j];
+		name = field->name;
+		if (strcmp(name, "src_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.src_addr) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.dst_addr) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "next_proto_id") == 0) {
+			uint8_t mask;
+
+			mask = (uint8_t)field->mask_32b;
+			if (mask != ipv4_mask->hdr.next_proto_id)
+				return -EINVAL;
+			flag_next_proto_id = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_next_proto_id) {
+		if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_tcp *tcp_mask)
+{
+	int field_size, j;
+	int flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !tcp_mask)
+		return 0;
+
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (tcp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (tcp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (tcp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (tcp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_udp *udp_mask)
+{
+	int field_size, j;
+	bool flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && !udp_mask)
+		return 0;
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst */
+		name = field->name; /* match: rte_flow_item->mask */
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (udp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (udp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name: %s.", name);
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (udp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (udp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto,
+		 const struct rte_flow_item_vxlan *vxlan_mask)
+{
+	int field_size, j;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && !vxlan_mask)
+		return 0;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		int64_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		mask = (int64_t)field->mask_32b;
+		if (strcmp(name, "vx_vni") == 0) {
+			if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) != mask)
+				return -EINVAL;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_icmp *icmp_mask)
+{
+	int field_size;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if ((field_size != 0 && !icmp_mask) || (field_size == 0 && icmp_mask))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
+			     int proto_size,
+			     const struct rte_flow_item *items)
+{
+	int i, length;
+	int j = 0;
+
+	length = cpfl_get_items_length(items);
+	if (proto_size > length - 1)
+		return -EINVAL;
+	for (i = 0; i < proto_size; i++) {
+		struct cpfl_flow_js_pr_key_proto *key_proto;
+		enum rte_flow_item_type type;
+
+		key_proto = &protocols[i];
+		/* pr->key->proto->type */
+		type = key_proto->type;
+		/* pr->key->proto->fields */
+		switch (type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
+				const struct rte_flow_item_eth *eth_mask;
+				int ret;
+
+				eth_mask = (const struct rte_flow_item_eth *)items[i].mask;
+				ret = cpfl_check_eth(key_proto, eth_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				const struct rte_flow_item_ipv4 *ipv4_mask;
+				int ret;
+
+				ipv4_mask = (const struct rte_flow_item_ipv4 *)items[i].mask;
+				ret = cpfl_check_ipv4(key_proto, ipv4_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) {
+				const struct rte_flow_item_tcp *tcp_mask;
+				int ret;
+
+				tcp_mask = (const struct rte_flow_item_tcp *)items[i].mask;
+				ret = cpfl_check_tcp(key_proto, tcp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) {
+				const struct rte_flow_item_udp *udp_mask;
+				int ret;
+
+				udp_mask = (const struct rte_flow_item_udp *)items[i].mask;
+				ret = cpfl_check_udp(key_proto, udp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+				const struct rte_flow_item_vxlan *vxlan_mask;
+				int ret;
+
+				vxlan_mask = (const struct rte_flow_item_vxlan *)items[i].mask;
+				ret = cpfl_check_vxlan(key_proto, vxlan_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) {
+				const struct rte_flow_item_icmp *icmp_mask;
+				int ret;
+
+				icmp_mask = (const struct rte_flow_item_icmp *)items[i].mask;
+				ret = cpfl_check_icmp(key_proto, icmp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+	if (items[j].type != RTE_FLOW_ITEM_TYPE_END)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr,
+			    const struct rte_flow_attr *attr)
+{
+	if (key_attr->ingress != attr->ingress) {
+		PMD_DRV_LOG(DEBUG, "ingress not match.");
+		return -EINVAL;
+	}
+	if (key_attr->egress != attr->egress) {
+		PMD_DRV_LOG(DEBUG, "egress not match.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern,
+		       const struct rte_flow_item *items,
+		       const struct rte_flow_attr *attr)
+{
+	int ret;
+
+	/* pr->key */
+	/* pr->key->protocols */
+	ret = cpfl_check_pattern_key_proto(pattern->key.protocols,
+					   pattern->key.proto_size, items);
+	if (ret < 0)
+		return -EINVAL;
+	/* pr->key->attributes */
+	ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr);
+	if (ret < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* output: struct cpfl_flow_pr_action* pr_action */
+int
+cpfl_flow_parse_items(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_parser *parser,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, size;
+	struct cpfl_flow_js_pr *pattern;
+
+	size = parser->pr_size;
+	for (i = 0; i < size; i++) {
+		int ret;
+
+		pattern = &parser->patterns[i];
+		ret = cpfl_check_pattern_key(pattern, items, attr);
+		if (ret < 0)
+			continue;
+		/* pr->actions */
+		ret = cpfl_parse_pr_actions(itf, pattern->actions, pattern->actions_size,
+					    items, attr, pr_action);
+		return ret;
+	}
+
+	return -EINVAL;
+}
+
+bool
+cpfl_metadata_write_port_id(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 5;
+
+	dev_id = cpfl_get_port_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID\n");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 3);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_targetvsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 2;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 1);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 0;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
+
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 24;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
new file mode 100644
index 0000000000..367a6da574
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <jansson.h>
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#ifndef _CPFL_FLOW_PARSER_H_
+#define _CPFL_FLOW_PARSER_H_
+
+#define CPFL_FLOW_JSON_STR_SIZE_MAX 100
+#define CPFL_MAX_SEM_FV_KEY_SIZE 64
+#define CPFL_FLOW_JS_PROTO_SIZE 16
+#define CPFL_MOD_KEY_NUM_MAX 8
+
+/* Pattern Rules Storage */
+enum cpfl_flow_pr_action_type {
+	CPFL_JS_PR_ACTION_TYPE_SEM,
+	CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
+};
+
+/* This structure matches a sequence of fields in struct rte_flow_attr */
+struct cpfl_flow_js_pr_key_attr {
+	uint16_t ingress;
+	uint16_t egress;
+};
+
+struct cpfl_flow_js_pr_key_proto_field {
+	char name[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	union {
+		char mask[CPFL_FLOW_JSON_STR_SIZE_MAX];
+		uint32_t mask_32b;
+	};
+};
+
+/* This structure matches a sequence of "struct rte_flow_item" */
+struct cpfl_flow_js_pr_key_proto {
+	enum rte_flow_item_type type;
+	struct cpfl_flow_js_pr_key_proto_field *fields;
+	int fields_size;
+};
+
+enum cpfl_flow_js_fv_type {
+	CPFL_FV_TYPE_PROTOCOL,
+	CPFL_FV_TYPE_IMMEDIATE,
+	CPFL_FV_TYPE_METADATA,
+	CPFL_FV_TYPE_UNKNOWN = -1,
+};
+
+struct cpfl_flow_js_fv {
+	uint16_t offset;
+	enum cpfl_flow_js_fv_type type;
+	union {
+		/*  a 16 bits value */
+		uint16_t immediate;
+		/* a reference to a protocol header with a <header, layer, offset, mask> tuple */
+		struct {
+			enum rte_flow_item_type header;
+			uint16_t layer;
+			uint16_t offset;
+			uint16_t mask;
+		} proto;
+		/* a reference to a metadata */
+		struct {
+			uint16_t type;
+			uint16_t offset;
+			uint16_t mask;
+		} meta;
+	};
+};
+
+/**
+ * This structure defines the message be used to composite the
+ * profile / key of a SEM control packet
+ */
+struct cpfl_flow_js_pr_action_sem {
+	uint16_t prof;		    /* SEM profile ID */
+	uint16_t subprof;	    /* SEM subprofile ID */
+	uint16_t keysize;	    /*  extract key size in bytes */
+	struct cpfl_flow_js_fv *fv; /* A SEM field vector array */
+	int fv_size;
+};
+
+/* define how to map current key to low level pipeline configuration */
+struct cpfl_flow_js_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_js_pr_action_sem sem;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD how to parse rte_flow
+ * protocol headers. Each rule be described by a key object and a action array.
+ */
+struct cpfl_flow_js_pr {
+	struct {
+		struct cpfl_flow_js_pr_key_proto *protocols;
+		uint16_t proto_size;
+		struct cpfl_flow_js_pr_key_attr *attributes;
+		uint16_t attr_size;
+	} key;
+	/* An array to define how to map current key to low level pipeline configuration. */
+	struct cpfl_flow_js_pr_action *actions;
+	uint16_t actions_size;
+};
+
+struct cpfl_flow_js_parser {
+	struct cpfl_flow_js_pr *patterns;
+	int pr_size;
+};
+
+/* Pattern Rules */
+struct cpfl_flow_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	uint8_t cpfl_flow_pr_fv[CPFL_MAX_SEM_FV_KEY_SIZE];
+};
+
+struct cpfl_flow_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_pr_action_sem sem;
+	};
+};
+
+int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
+int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
+int cpfl_flow_parse_items(struct cpfl_itf *itf,
+			  struct cpfl_flow_js_parser *parser,
+			  const struct rte_flow_item *items,
+			  const struct rte_flow_attr *attr,
+			  struct cpfl_flow_pr_action *pr_action);
+bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf);
+
+static inline void
+cpfl_metadata_init(struct cpfl_metadata *meta)
+{
+	int i;
+
+	for (i = 0; i < CPFL_META_LENGTH; i++)
+		meta->chunks[i].type = i;
+}
+
+static inline void
+cpfl_metadata_write16(struct cpfl_metadata *meta, int type, int offset, uint16_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint16_t));
+}
+
+static inline void
+cpfl_metadata_write32(struct cpfl_metadata *meta, int type, int offset, uint32_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint32_t));
+}
+
+static inline uint16_t
+cpfl_metadata_read16(struct cpfl_metadata *meta, int type, int offset)
+{
+	return *((uint16_t *)(&meta->chunks[type].data[offset]));
+}
+
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d8b92ae16a..d767818eb7 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -38,3 +38,10 @@ if arch_subdir == 'x86'
         cflags += ['-DCC_AVX512_SUPPORT']
     endif
 endif
+
+if dpdk_conf.has('RTE_HAS_JANSSON')
+    sources += files(
+            'cpfl_flow_parser.c',
+    )
+    ext_deps += jansson_dep
+endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v8 2/9] net/cpfl: build action mapping rules from JSON
  2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
  2023-09-27 12:54             ` [PATCH v8 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
@ 2023-09-27 12:54             ` yuying.zhang
  2023-09-27 12:54             ` [PATCH v8 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
                               ` (8 subsequent siblings)
  10 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-27 12:54 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Build rules that maps from an rte flow action vxlan_encap or
vxlan_decap to hardware representations.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_flow_parser.c | 538 +++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_flow_parser.h | 100 ++++++
 2 files changed, 637 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
index 308f9c9736..81ccbbd8e5 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.c
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -32,6 +32,18 @@ cpfl_get_item_type_by_str(const char *type)
 	return RTE_FLOW_ITEM_TYPE_VOID;
 }
 
+static enum rte_flow_action_type
+cpfl_get_action_type_by_str(const char *type)
+{
+	if (strcmp(type, "vxlan_encap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	else if (strcmp(type, "vxlan_decap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ACTION_TYPE_VOID;
+}
+
 static const char *
 cpfl_json_t_to_string(json_t *object, const char *name)
 {
@@ -50,6 +62,29 @@ cpfl_json_t_to_string(json_t *object, const char *name)
 	return json_string_value(subobject);
 }
 
+static int
+cpfl_json_t_to_int(json_t *object, const char *name, int *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (int)json_integer_value(subobject);
+
+	return 0;
+}
+
 static int
 cpfl_json_t_to_uint16(json_t *object, const char *name, uint16_t *value)
 {
@@ -522,6 +557,228 @@ cpfl_flow_js_pattern_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 	return -EINVAL;
 }
 
+static int
+cpfl_flow_js_mr_key(json_t *ob_mr_keys, struct cpfl_flow_js_mr_key *js_mr_key)
+{
+	int len, i;
+
+	len = json_array_size(ob_mr_keys);
+	if (len == 0)
+		return 0;
+	js_mr_key->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_key_action) * len, 0);
+	if (!js_mr_key->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_mr_key->actions_size = len;
+	for (i = 0; i < len; i++) {
+		json_t *object, *ob_data;
+		const char *type;
+		enum rte_flow_action_type act_type;
+
+		object = json_array_get(ob_mr_keys, i);
+		/* mr->key->actions->type */
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		act_type = cpfl_get_action_type_by_str(type);
+		if (act_type == RTE_FLOW_ACTION_TYPE_VOID)
+			goto err;
+		js_mr_key->actions[i].type = act_type;
+		/* mr->key->actions->data */
+		ob_data = json_object_get(object, "data");
+		if (js_mr_key->actions[i].type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			json_t *ob_protos;
+			int proto_size, j;
+			struct cpfl_flow_js_mr_key_action_vxlan_encap *encap;
+
+			ob_protos = json_object_get(ob_data, "protocols");
+			encap = &js_mr_key->actions[i].encap;
+			if (!ob_protos) {
+				encap->proto_size = 0;
+				continue;
+			}
+			proto_size = json_array_size(ob_protos);
+			encap->proto_size = proto_size;
+			for (j = 0; j < proto_size; j++) {
+				const char *s;
+				json_t *subobject;
+				enum rte_flow_item_type proto_type;
+
+				subobject = json_array_get(ob_protos, j);
+				s = json_string_value(subobject);
+				proto_type = cpfl_get_item_type_by_str(s);
+				if (proto_type == RTE_FLOW_ITEM_TYPE_VOID) {
+					PMD_DRV_LOG(ERR, "parse VXLAN_ENCAP failed.");
+					goto err;
+				}
+				encap->protocols[j] = proto_type;
+			}
+		} else if (js_mr_key->actions[i].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			PMD_DRV_LOG(ERR, "not support this type: %d.", js_mr_key->actions[i].type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mr_key->actions);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_layout(json_t *ob_layouts, struct cpfl_flow_js_mr_action_mod *js_mod)
+{
+	int len, i;
+
+	len = json_array_size(ob_layouts);
+	js_mod->layout_size = len;
+	if (len == 0)
+		return 0;
+	js_mod->layout = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_layout) * len, 0);
+	if (!js_mod->layout) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		int index = 0, size = 0, offset = 0;
+		int ret;
+		const char *hint;
+
+		object = json_array_get(ob_layouts, i);
+		ret = cpfl_json_t_to_int(object, "index", &index);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'index'.");
+			goto err;
+		}
+		js_mod->layout[i].index = index;
+		ret = cpfl_json_t_to_int(object, "size", &size);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+			goto err;
+		}
+		js_mod->layout[i].size = size;
+		ret = cpfl_json_t_to_int(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_mod->layout[i].offset = offset;
+		hint = cpfl_json_t_to_string(object, "hint");
+		if (!hint) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'hint'.");
+			goto err;
+		}
+		memcpy(js_mod->layout[i].hint, hint, strlen(hint));
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mod->layout);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_action(json_t *ob_mr_act, struct cpfl_flow_js_mr_action *js_mr_act)
+{
+	json_t *ob_data;
+	const char *type;
+
+	/* mr->action->type */
+	type = cpfl_json_t_to_string(ob_mr_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* mr->action->data */
+	ob_data = json_object_get(ob_mr_act, "data");
+	if (strcmp(type, "mod") == 0) {
+		json_t *ob_layouts;
+		uint16_t profile = 0;
+		int ret;
+
+		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		ret = cpfl_json_t_to_uint16(ob_data, "profile", &profile);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		js_mr_act->mod.prof = profile;
+		ob_layouts = json_object_get(ob_data, "layout");
+		ret = cpfl_flow_js_mr_layout(ob_layouts, &js_mr_act->mod);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse layout.");
+			return ret;
+		}
+	} else  {
+		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * The modifications object array defines a set of rules for the PMD to match rte_flow
+ * modification actions and translate them into the Modification profile. This object
+ * is optional.
+ */
+static int
+cpfl_flow_js_mod_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_t *ob_mrs;
+	int i, len;
+
+	ob_mrs = json_object_get(ob_root, "modifications");
+	if (!ob_mrs) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+	len = json_array_size(ob_mrs);
+	if (len == 0)
+		return 0;
+	parser->mr_size = len;
+	parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr) * len, 0);
+	if (!parser->modifications) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		int ret;
+		json_t *object, *ob_mr_key, *ob_mr_action, *ob_mr_key_action;
+
+		object = json_array_get(ob_mrs, i);
+		/* mr->key */
+		ob_mr_key = json_object_get(object, "key");
+		/* mr->key->actions */
+		ob_mr_key_action = json_object_get(ob_mr_key, "actions");
+		ret = cpfl_flow_js_mr_key(ob_mr_key_action, &parser->modifications[i].key);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_key failed.");
+			goto err;
+		}
+		/* mr->action */
+		ob_mr_action = json_object_get(object, "action");
+		ret = cpfl_flow_js_mr_action(ob_mr_action, &parser->modifications[i].action);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_action failed.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->modifications);
+	return -EINVAL;
+}
+
 static int
 cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 {
@@ -532,6 +789,11 @@ cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
 		return ret;
 	}
+	ret = cpfl_flow_js_mod_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
+		return ret;
+	}
 
 	return 0;
 }
@@ -602,6 +864,15 @@ cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
 		rte_free(pattern->actions);
 	}
 	rte_free(parser->patterns);
+	for (i = 0; i < parser->mr_size; i++) {
+		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
+
+		if (!mr)
+			continue;
+		rte_free(mr->key.actions);
+		rte_free(mr->action.mod.layout);
+	}
+	rte_free(parser->modifications);
 	rte_free(parser);
 
 	return 0;
@@ -618,6 +889,17 @@ cpfl_get_items_length(const struct rte_flow_item *items)
 	return length;
 }
 
+static int
+cpfl_get_actions_length(const struct rte_flow_action *actions)
+{
+	int length = 0;
+	const struct rte_flow_action *action = actions;
+
+	while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END)
+		continue;
+	return length;
+}
+
 static int
 cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
 		       uint16_t offset, uint8_t *fv)
@@ -646,7 +928,7 @@ cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item
 				break;
 			}
 			layer++;
-		} /* TODO: more type... */
+		}
 	}
 
 	return 0;
@@ -1235,6 +1517,260 @@ cpfl_flow_parse_items(struct cpfl_itf *itf,
 	return -EINVAL;
 }
 
+/* modifications rules */
+static int
+cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap *encap,
+			       const struct rte_flow_action *action)
+{
+	const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+	struct rte_flow_item *definition;
+	int def_length, i, proto_size;
+
+	action_vxlan_encap = (const struct rte_flow_action_vxlan_encap *)action->conf;
+	definition = action_vxlan_encap->definition;
+	def_length = cpfl_get_items_length(definition);
+	proto_size = encap->proto_size;
+	if (proto_size != def_length - 1) {
+		PMD_DRV_LOG(DEBUG, "protocols not match.");
+		return -EINVAL;
+	}
+	for (i = 0; i < proto_size; i++) {
+		enum rte_flow_item_type proto;
+
+		proto = encap->protocols[i];
+		if (proto == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) {
+				PMD_DRV_LOG(DEBUG, "protocols not match.");
+				return -EINVAL;
+			}
+		} else if (proto != definition[i].type) {
+			PMD_DRV_LOG(DEBUG, "protocols not match.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* check and parse */
+static int
+cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int size,
+			 const struct rte_flow_action *actions,
+			 struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int actions_length, i;
+	int j = 0;
+	int ret;
+
+	actions_length = cpfl_get_actions_length(actions);
+	if (size > actions_length - 1)
+		return -EINVAL;
+	for (i = 0; i < size; i++) {
+		enum rte_flow_action_type type;
+		struct cpfl_flow_js_mr_key_action *key_act;
+
+		key_act = &key_acts[i];
+		/* mr->key->actions->type */
+		type = key_act->type;
+		/* mr->key->actions->data */
+		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			int proto_size, k;
+			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
+
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+			mr_key_action[i].encap.action = &actions[j];
+			encap = &mr_key_action[i].encap;
+
+			proto_size = key_act->encap.proto_size;
+			encap->proto_size = proto_size;
+			for (k = 0; k < proto_size; k++) {
+				enum rte_flow_item_type proto;
+
+				proto = key_act->encap.protocols[k];
+				encap->protocols[k] = proto;
+			}
+			ret = cpfl_check_actions_vxlan_encap(encap, &actions[j]);
+			if (ret < 0)
+				return -EINVAL;
+			j++;
+		} else if (type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+			j++;
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+/* output: uint8_t *buffer, uint16_t *byte_len */
+static int
+cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
+		  struct cpfl_flow_mr_key_action *mr_key_action,
+		  uint8_t *buffer, uint16_t *byte_len)
+{
+	int i;
+	int start = 0;
+
+	for (i = 0; i < layout_size; i++) {
+		int index, size, offset;
+		const char *hint;
+		const uint8_t *addr = NULL;
+		struct cpfl_flow_mr_key_action *temp;
+		struct cpfl_flow_js_mr_layout *layout;
+
+		layout = &layouts[i];
+		/* index links to the element of the actions array. */
+		index = layout->index;
+		size = layout->size;
+		offset = layout->offset;
+		if (index == -1) {
+			hint = "dummpy";
+			start += size;
+			continue;
+		}
+		hint = layout->hint;
+		temp = mr_key_action + index;
+		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+			struct rte_flow_item *definition;
+			int def_length, k;
+
+			action_vxlan_encap =
+			    (const struct rte_flow_action_vxlan_encap *)temp->encap.action->conf;
+			definition = action_vxlan_encap->definition;
+			def_length = cpfl_get_items_length(definition);
+			for (k = 0; k < def_length - 1; k++) {
+				if ((strcmp(hint, "eth") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_ETH) ||
+				    (strcmp(hint, "ipv4") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+				    (strcmp(hint, "udp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_UDP) ||
+				    (strcmp(hint, "tcp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_TCP) ||
+				    (strcmp(hint, "vxlan") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_VXLAN)) {
+					addr = (const uint8_t *)(definition[k].spec);
+					if (start > 255) {
+						*byte_len = 0;
+						PMD_DRV_LOG(ERR, "byte length is too long: %s",
+							    hint);
+						return -EINVAL;
+					}
+					memcpy(buffer + start, addr + offset, size);
+					break;
+				} /* TODO: more hint... */
+			}
+			if (k == def_length - 1) {
+				*byte_len = 0;
+				PMD_DRV_LOG(ERR, "can not find corresponding hint: %s", hint);
+				return -EINVAL;
+			}
+		} else {
+			*byte_len = 0;
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", temp->type);
+			return -EINVAL;
+		} /* else TODO: more type... */
+		start += size;
+	}
+	*byte_len = start;
+
+	return 0;
+}
+
+static int
+cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action,
+		     struct cpfl_flow_mr_key_action *mr_key_action,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	enum cpfl_flow_mr_action_type type;
+
+	/* mr->action->type */
+	type = action->type;
+	/* mr->action->data */
+	if (type == CPFL_JS_MR_ACTION_TYPE_MOD) {
+		struct cpfl_flow_js_mr_layout *layout;
+
+		mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		mr_action->mod.byte_len = 0;
+		mr_action->mod.prof = action->mod.prof;
+		layout = action->mod.layout;
+		if (!layout)
+			return 0;
+		memset(mr_action->mod.data, 0, sizeof(mr_action->mod.data));
+
+		return cpfl_parse_layout(layout, action->mod.layout_size, mr_key_action,
+					 mr_action->mod.data, &mr_action->mod.byte_len);
+	}
+	PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+
+	return -EINVAL;
+}
+
+static int
+cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action *actions,
+		   struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int key_action_size;
+
+	/* mr->key->actions */
+	key_action_size = mr->key.actions_size;
+	return cpfl_parse_mr_key_action(mr->key.actions, key_action_size, actions, mr_key_action);
+}
+
+/* output: struct cpfl_flow_mr_action *mr_action */
+static int
+cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	int i;
+	struct cpfl_flow_mr_key_action mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0};
+
+	for (i = 0; i < parser->mr_size; i++) {
+		int ret;
+		struct cpfl_flow_js_mr *mr;
+
+		mr = &parser->modifications[i];
+		if (!mr)
+			return -EINVAL;
+		ret = cpfl_check_mod_key(mr, actions, mr_key_action);
+		if (ret < 0)
+			continue;
+		/* mr->action */
+		return cpfl_parse_mr_action(&mr->action, mr_key_action, mr_action);
+	}
+
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+			struct cpfl_flow_mr_action *mr_action)
+{
+	/* modifications rules */
+	if (!parser->modifications) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	return cpfl_parse_mod_rules(parser, actions, mr_action);
+}
+
 bool
 cpfl_metadata_write_port_id(struct cpfl_itf *itf)
 {
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
index 367a6da574..b7bf21bd76 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.h
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -105,9 +105,79 @@ struct cpfl_flow_js_pr {
 	uint16_t actions_size;
 };
 
+/* Modification Rules Storage */
+/**
+ * The vxlan_encap action matches RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
+ * and include a sequence of protocol headers defined in field protocols
+ * of data.
+ */
+struct cpfl_flow_js_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	int proto_size;
+};
+
+/* A set of modification rte_flow_action_xxx objects can be defined as a type / data pair. */
+struct cpfl_flow_js_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_js_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_js_mr_key {
+	struct cpfl_flow_js_mr_key_action *actions;
+	int actions_size;
+};
+
+struct cpfl_flow_js_mr_layout {
+	int index;				/* links to the element of the actions array */
+	char hint[CPFL_FLOW_JSON_STR_SIZE_MAX]; /* where the data to copy from */
+	uint16_t offset;			/* the start byte of the data to copy from */
+	uint16_t size; /*  bytes of the data to be copied to the memory region */
+};
+
+/** For mod data, besides the profile ID, a layout array defines a set of hints that helps
+ * driver composing the MOD memory region when the action need to insert/update some packet
+ * data from user input.
+ */
+struct cpfl_flow_js_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	struct cpfl_flow_js_mr_layout *layout;
+	int layout_size;
+};
+
+enum cpfl_flow_mr_action_type {
+	CPFL_JS_MR_ACTION_TYPE_MOD,
+};
+
+/** Currently, the type can only be mod.
+ *
+ * For mod data, besides the profile ID, a layout array defines a set
+ * of hints that helps driver composing the MOD memory region when the
+ * action need to insert/update some packet data from user input.
+ */
+struct cpfl_flow_js_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_js_mr_action_mod mod;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD to parse rte_flow modification
+ * actions. Each rule be described by a pair of key and action
+ */
+struct cpfl_flow_js_mr {
+	struct cpfl_flow_js_mr_key key;
+	struct cpfl_flow_js_mr_action action;
+};
+
 struct cpfl_flow_js_parser {
 	struct cpfl_flow_js_pr *patterns;
 	int pr_size;
+	struct cpfl_flow_js_mr *modifications;
+	int mr_size;
 };
 
 /* Pattern Rules */
@@ -125,6 +195,33 @@ struct cpfl_flow_pr_action {
 	};
 };
 
+/* Modification Rules */
+struct cpfl_flow_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	uint16_t proto_size;
+	const struct rte_flow_action *action;
+};
+
+struct cpfl_flow_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	uint8_t data[256];
+};
+
+struct cpfl_flow_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_mr_action_mod mod;
+	};
+};
+
 int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
 int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
 int cpfl_flow_parse_items(struct cpfl_itf *itf,
@@ -132,6 +229,9 @@ int cpfl_flow_parse_items(struct cpfl_itf *itf,
 			  const struct rte_flow_item *items,
 			  const struct rte_flow_attr *attr,
 			  struct cpfl_flow_pr_action *pr_action);
+int cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser,
+			    const struct rte_flow_action *actions,
+			    struct cpfl_flow_mr_action *mr_action);
 bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
 bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
 bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v8 3/9] net/cpfl: set up rte flow skeleton
  2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
  2023-09-27 12:54             ` [PATCH v8 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
  2023-09-27 12:54             ` [PATCH v8 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
@ 2023-09-27 12:54             ` yuying.zhang
  2023-09-27 12:54             ` [PATCH v8 4/9] net/cpfl: set up control path yuying.zhang
                               ` (7 subsequent siblings)
  10 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-27 12:54 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Set up the rte_flow backend skeleton. Introduce the framework
to support different engines as rte_flow backend. Bridge rte_flow
driver API to flow engines.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  53 ++++++
 drivers/net/cpfl/cpfl_ethdev.h |   5 +
 drivers/net/cpfl/cpfl_flow.c   | 339 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow.h   |  85 +++++++++
 drivers/net/cpfl/meson.build   |   1 +
 5 files changed, 483 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 1745f703c8..c350728861 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -15,6 +15,7 @@
 #include "cpfl_ethdev.h"
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1074,6 +1075,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+	struct rte_flow *p_flow;
+
+	while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+		TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+		if (p_flow->engine->free)
+			p_flow->engine->free(p_flow);
+		rte_free(p_flow);
+	}
+}
+
 static int
 cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
 {
@@ -1105,6 +1119,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
 		cpfl_p2p_queue_grps_del(vport);
 
+	cpfl_flow_free(cpfl_vport);
 	idpf_vport_deinit(vport);
 	rte_free(cpfl_vport->p2p_q_chunks_info);
 
@@ -1117,6 +1132,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+		      const struct rte_flow_ops **ops)
+{
+	struct cpfl_itf *itf;
+
+	if (!dev)
+		return -EINVAL;
+
+	itf = CPFL_DEV_TO_ITF(dev);
+
+	/* only vport support rte_flow */
+	if (itf->type != CPFL_ITF_TYPE_VPORT)
+		return -ENOTSUP;
+#ifdef RTE_HAS_JANSSON
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static int
 cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 			    size_t len, uint32_t tx)
@@ -1318,6 +1356,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.xstats_get			= cpfl_dev_xstats_get,
 	.xstats_get_names		= cpfl_dev_xstats_get_names,
 	.xstats_reset			= cpfl_dev_xstats_reset,
+	.flow_ops_get			= cpfl_dev_flow_ops_get,
 	.hairpin_cap_get		= cpfl_hairpin_cap_get,
 	.rx_hairpin_queue_setup		= cpfl_rx_hairpin_queue_setup,
 	.tx_hairpin_queue_setup		= cpfl_tx_hairpin_queue_setup,
@@ -2021,6 +2060,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+#ifdef RTE_HAS_JANSSON
+	ret = cpfl_flow_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init flow module");
+		goto err_flow_init;
+	}
+#endif
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2028,6 +2074,9 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+#ifdef RTE_HAS_JANSSON
+err_flow_init:
+#endif
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2182,6 +2231,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
 	cpfl_vport->itf.adapter = adapter;
 	cpfl_vport->itf.data = dev->data;
+	TAILQ_INIT(&cpfl_vport->itf.flow_list);
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
@@ -2262,6 +2312,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+#ifdef RTE_HAS_JANSSON
+	cpfl_flow_uninit(adapter);
+#endif
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 383dbd14c6..69bf32cfbd 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -140,9 +140,12 @@ enum cpfl_itf_type {
 	CPFL_ITF_TYPE_REPRESENTOR,
 };
 
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
+	struct cpfl_flow_list flow_list;
 	void *data;
 };
 
@@ -206,6 +209,8 @@ struct cpfl_adapter_ext {
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
 
+	struct cpfl_flow_js_parser *flow_parser;
+
 	struct cpfl_metadata meta;
 };
 
diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c
new file mode 100644
index 0000000000..03dd1ffa44
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.c
@@ -0,0 +1,339 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+	TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->parse_pattern_action)
+			continue;
+
+		if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+			continue;
+		return engine;
+	}
+
+	return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+	int ret;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->init) {
+			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+				     engine->type);
+			return -ENOTSUP;
+		}
+
+		ret = engine->init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+				     engine->type);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (engine->uninit)
+			engine->uninit(adapter);
+	}
+}
+
+static int
+cpfl_flow_attr_valid(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	if (attr->priority > CPFL_PREC_MAX) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Only support priority 0-7.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	int ret;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = cpfl_flow_attr_valid(attr, error);
+	if (ret)
+		return ret;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+__cpfl_flow_validate(struct rte_eth_dev *dev,
+		     const struct rte_flow_attr *attr,
+		     const struct rte_flow_item pattern[],
+		     const struct rte_flow_action actions[],
+		     void **meta,
+		     struct cpfl_flow_engine **engine,
+		     struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+	if (ret)
+		return ret;
+
+	*engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta);
+	if (!*engine) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched engine.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	int ret;
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error);
+
+	return ret;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_engine *engine = NULL;
+	struct rte_flow *flow;
+	void *meta;
+	int ret;
+
+	flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return NULL;
+	}
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	if (!engine->create) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched flow creation function");
+		rte_free(flow);
+		return NULL;
+	}
+
+	ret = engine->create(dev, flow, meta, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	flow->engine = engine;
+	TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+	return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	int ret = 0;
+
+	if (!flow || !flow->engine || !flow->engine->destroy) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	ret = flow->engine->destroy(dev, flow, error);
+	if (!ret)
+		TAILQ_REMOVE(&itf->flow_list, flow, next);
+	else
+		PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+	return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct rte_flow *p_flow;
+	void *temp;
+	int ret = 0;
+
+	RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+		ret = cpfl_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to flush flows");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		const struct rte_flow_action *actions,
+		void *data,
+		struct rte_flow_error *error)
+{
+	struct rte_flow_query_count *count = data;
+	int ret = -EINVAL;
+
+	if (!flow || !flow->engine || !flow->engine->query_count) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			ret = flow->engine->query_count(dev, flow, count, error);
+			break;
+		default:
+			ret = rte_flow_error_set(error, ENOTSUP,
+						 RTE_FLOW_ERROR_TYPE_ACTION,
+						 actions,
+						 "action not supported");
+			break;
+		}
+	}
+
+	return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+	.validate = cpfl_flow_validate,
+	.create = cpfl_flow_create,
+	.destroy = cpfl_flow_destroy,
+	.flush = cpfl_flow_flush,
+	.query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+	int ret;
+
+	if (ad->devargs.flow_parser[0] == '\0') {
+		PMD_INIT_LOG(WARNING, "flow module is not initialized");
+		return 0;
+	}
+
+	ret = cpfl_flow_engine_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init flow engines");
+		goto err;
+	}
+
+	ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to create flow parser");
+		goto err;
+	}
+
+	return ret;
+
+err:
+	cpfl_flow_engine_uninit(ad);
+	return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+	if (ad->devargs.flow_parser[0] == '\0')
+		return;
+
+	cpfl_parser_destroy(ad->flow_parser);
+	cpfl_flow_engine_uninit(ad);
+}
diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h
new file mode 100644
index 0000000000..8c19b853ca
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.h
@@ -0,0 +1,85 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#define CPFL_PREC_MAX 7
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+	CPFL_FLOW_ENGINE_NONE = 0,
+	CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+			       struct rte_flow *flow,
+			       void *meta,
+			       struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+				struct rte_flow *flow,
+				struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+			      struct rte_flow *flow,
+			      struct rte_flow_query_count *count,
+			      struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+					     const struct rte_flow_attr *attr,
+					     const struct rte_flow_item pattern[],
+					     const struct rte_flow_action actions[],
+					     void **meta);
+
+struct cpfl_flow_engine {
+	TAILQ_ENTRY(cpfl_flow_engine) node;
+	enum cpfl_flow_engine_type type;
+	engine_init_t init;
+	engine_uninit_t uninit;
+	engine_create_t create;
+	engine_destroy_t destroy;
+	engine_query_t query_count;
+	engine_free_t free;
+	engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) next;
+	struct cpfl_flow_engine *engine;
+	void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev,
+						const struct rte_flow_attr *attr,
+						const struct rte_flow_item pattern[],
+						const struct rte_flow_action actions[],
+						void **meta);
+int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+				  const struct rte_flow_attr *attr,
+				  const struct rte_flow_item pattern[],
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+		    struct rte_flow *flow,
+		    const struct rte_flow_action *actions,
+		    void *data,
+		    struct rte_flow_error *error);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d767818eb7..f5654d5b0e 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -41,6 +41,7 @@ endif
 
 if dpdk_conf.has('RTE_HAS_JANSSON')
     sources += files(
+	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
     )
     ext_deps += jansson_dep
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v8 4/9] net/cpfl: set up control path
  2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
                               ` (2 preceding siblings ...)
  2023-09-27 12:54             ` [PATCH v8 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
@ 2023-09-27 12:54             ` yuying.zhang
  2023-09-27 12:54             ` [PATCH v8 5/9] net/cpfl: add FXP low level implementation yuying.zhang
                               ` (6 subsequent siblings)
  10 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-27 12:54 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Set up control vport and control queue for flow offloading.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_controlq.c | 801 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  75 +++
 drivers/net/cpfl/cpfl_ethdev.c   | 270 +++++++++++
 drivers/net/cpfl/cpfl_ethdev.h   |  14 +
 drivers/net/cpfl/cpfl_vchnl.c    | 144 ++++++
 drivers/net/cpfl/meson.build     |   1 +
 6 files changed, 1305 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h

diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
new file mode 100644
index 0000000000..4a925bc338
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -0,0 +1,801 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include "cpfl_controlq.h"
+#include "base/idpf_controlq.h"
+#include "rte_common.h"
+
+/**
+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
+ * @qinfo: pointer to create control queue info struct
+ *
+ * Verify that DMA parameter of each DMA memory struct is present and
+ * consistent with control queue parameters
+ */
+static inline int
+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
+{
+	struct idpf_dma_mem *ring = &qinfo->ring_mem;
+	struct idpf_dma_mem *buf = &qinfo->buf_mem;
+
+	if (!ring->va || !ring->size)
+		return -EINVAL;
+
+	if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
+		return -EINVAL;
+
+	/* no need for buffer checks for TX queues */
+	if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
+		return 0;
+
+	if (!buf->va || !buf->size)
+		return -EINVAL;
+
+	/* accommodate different types of rx ring buffer sizes */
+	if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
+	     buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
+	    (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
+	     buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ * @qinfo: pointer to create queue info struct
+ *
+ * The CP takes care of all DMA memory allocations. Store the allocated memory
+ * information for the descriptor ring and buffers. If the memory for either the
+ * descriptor ring or the buffers is not allocated properly and/or inconsistent
+ * with the control queue parameters, this routine will free the memory for
+ * both the descriptors and the buffers
+ */
+int
+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
+			 struct cpfl_ctlq_create_info *qinfo)
+{
+	int ret_code = 0;
+	unsigned int elem_size;
+	int i = 0;
+
+	ret_code = cpfl_check_dma_mem_parameters(qinfo);
+	if (ret_code)
+		/* TODO: Log an error message per CP */
+		goto err;
+
+	cq->desc_ring.va = qinfo->ring_mem.va;
+	cq->desc_ring.pa = qinfo->ring_mem.pa;
+	cq->desc_ring.size = qinfo->ring_mem.size;
+
+	switch (cq->cq_type) {
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_RDMA_RX:
+		/* Only receive queues will have allocated buffers
+		 * during init.  CP allocates one big chunk of DMA
+		 * region who size is equal to ring_len * buff_size.
+		 * In CPFLib, the block gets broken down to multiple
+		 * smaller blocks that actually gets programmed in the hardware.
+		 */
+
+		cq->bi.rx_buff = (struct idpf_dma_mem **)
+			idpf_calloc(hw, cq->ring_size,
+				    sizeof(struct idpf_dma_mem *));
+		if (!cq->bi.rx_buff) {
+			ret_code = -ENOMEM;
+			/* TODO: Log an error message per CP */
+			goto err;
+		}
+
+		elem_size = qinfo->buf_size;
+		for (i = 0; i < cq->ring_size; i++) {
+			cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
+					    (hw, 1,
+					     sizeof(struct idpf_dma_mem));
+			if (!cq->bi.rx_buff[i]) {
+				ret_code = -ENOMEM;
+				goto free_rx_buffs;
+			}
+			cq->bi.rx_buff[i]->va =
+			    (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));
+			cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
+					       (i * elem_size);
+			cq->bi.rx_buff[i]->size = elem_size;
+		}
+		break;
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_RDMA_TX:
+	case IDPF_CTLQ_TYPE_RDMA_COMPL:
+		break;
+	default:
+		ret_code = -EINVAL;
+	}
+
+	return ret_code;
+
+free_rx_buffs:
+	i--;
+	for (; i >= 0; i--)
+		idpf_free(hw, cq->bi.rx_buff[i]);
+
+	if (!cq->bi.rx_buff)
+		idpf_free(hw, cq->bi.rx_buff);
+
+err:
+	return ret_code;
+}
+
+/**
+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	for (i = 0; i < cq->ring_size; i++) {
+		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+		/* No buffer to post to descriptor, continue */
+		if (!bi)
+			continue;
+
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+		desc->opcode = 0;
+		desc->datalen = CPU_TO_LE16(bi->size);
+		desc->ret_val = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+		desc->params.indirect.param0 = 0;
+		desc->params.indirect.param1 = 0;
+	}
+}
+
+/**
+ * cpfl_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
+{
+	/* set control queue registers in our local struct */
+	cq->reg.head = q_create_info->reg.head;
+	cq->reg.tail = q_create_info->reg.tail;
+	cq->reg.len = q_create_info->reg.len;
+	cq->reg.bah = q_create_info->reg.bah;
+	cq->reg.bal = q_create_info->reg.bal;
+	cq->reg.len_mask = q_create_info->reg.len_mask;
+	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+	cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * cpfl_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
+{
+	/* Update tail to post pre-allocated buffers for rx queues */
+	if (is_rxq)
+		wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
+
+	/* For non-Mailbox control queues only TAIL need to be set */
+	if (cq->q_id != -1)
+		return;
+
+	/* Clear Head for both send or receive */
+	wr32(hw, cq->reg.head, 0);
+
+	/* set starting point */
+	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
+ * @hw: context info for the callback
+ * @cq: pointer to the specific control queue
+ *
+ * DMA buffers are released by the CP itself
+ */
+static void
+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
+{
+	int i;
+
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
+	    cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
+		for (i = 0; i < cq->ring_size; i++)
+			idpf_free(hw, cq->bi.rx_buff[i]);
+		/* free the buffer header */
+		idpf_free(hw, cq->bi.rx_buff);
+	} else {
+		idpf_free(hw, cq->bi.tx_msg);
+	}
+}
+
+/**
+ * cpfl_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ */
+int
+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+	      struct idpf_ctlq_info **cq_out)
+{
+	struct idpf_ctlq_info *cq;
+	bool is_rxq = false;
+	int status = 0;
+
+	if (!qinfo->len || !qinfo->buf_size ||
+	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+		return -EINVAL;
+
+	cq = (struct idpf_ctlq_info *)
+	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+
+	if (!cq)
+		return -ENOMEM;
+
+	cq->cq_type = qinfo->type;
+	cq->q_id = qinfo->id;
+	cq->buf_size = qinfo->buf_size;
+	cq->ring_size = qinfo->len;
+
+	cq->next_to_use = 0;
+	cq->next_to_clean = 0;
+	cq->next_to_post = cq->ring_size - 1;
+
+	switch (qinfo->type) {
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+		is_rxq = true;
+		/* fallthrough */
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+		status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
+		break;
+
+	default:
+		status = -EINVAL;
+		break;
+	}
+
+	if (status)
+		goto init_free_q;
+
+	if (is_rxq) {
+		cpfl_ctlq_init_rxq_bufs(cq);
+	} else {
+		/* Allocate the array of msg pointers for TX queues */
+		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
+			idpf_calloc(hw, qinfo->len,
+				    sizeof(struct idpf_ctlq_msg *));
+		if (!cq->bi.tx_msg) {
+			status = -ENOMEM;
+			goto init_dealloc_q_mem;
+		}
+	}
+
+	cpfl_ctlq_setup_regs(cq, qinfo);
+
+	cpfl_ctlq_init_regs(hw, cq, is_rxq);
+
+	idpf_init_lock(&cq->cq_lock);
+
+	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
+
+	*cq_out = cq;
+	return status;
+
+init_dealloc_q_mem:
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+	idpf_free(hw, cq);
+	cq = NULL;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+	       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_desc *desc;
+	int num_desc_avail = 0;
+	int status = 0;
+	int i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	/* Ensure there are enough descriptors to send all messages */
+	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+		status = -ENOSPC;
+		goto sq_send_command_out;
+	}
+
+	for (i = 0; i < num_q_msg; i++) {
+		struct idpf_ctlq_msg *msg = &q_msg[i];
+
+		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+		desc->opcode = CPU_TO_LE16(msg->opcode);
+		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+		desc->cookie_high =
+			CPU_TO_LE32(msg->cookie.mbx.chnl_opcode);
+		desc->cookie_low =
+			CPU_TO_LE32(msg->cookie.mbx.chnl_retval);
+		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+				IDPF_CTLQ_FLAG_HOST_ID_S);
+		if (msg->data_len) {
+			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+			desc->datalen |= CPU_TO_LE16(msg->data_len);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+			/* Update the address values in the desc with the pa
+			 * value for respective buffer
+			 */
+			desc->params.indirect.addr_high =
+				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+			desc->params.indirect.addr_low =
+				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		} else {
+			idpf_memcpy(&desc->params, msg->ctx.direct,
+				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		}
+
+		/* Store buffer info */
+		cq->bi.tx_msg[cq->next_to_use] = msg;
+		(cq->next_to_use)++;
+		if (cq->next_to_use == cq->ring_size)
+			cq->next_to_use = 0;
+	}
+
+	/* Force memory write to complete before letting hardware
+	 * know that there are new descriptors to fetch.
+	 */
+	idpf_wmb();
+	wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+	idpf_release_lock(&cq->cq_lock);
+
+	return status;
+}
+
+/**
+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
+ * back for the requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ * @force: (input) clean descriptors which were not done yet. Use with caution
+ * in kernel mode only
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+static int
+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		     struct idpf_ctlq_msg *msg_status[], bool force)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t i = 0, num_to_clean;
+	uint16_t ntc, desc_err;
+	int ret = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*clean_count == 0)
+		return 0;
+	if (*clean_count > cq->ring_size)
+		return -EINVAL;
+
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *clean_count;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		desc_err = LE16_TO_CPU(desc->ret_val);
+		if (desc_err) {
+			/* strip off FW internal code */
+			desc_err &= 0xff;
+		}
+
+		msg_status[i] = cq->bi.tx_msg[ntc];
+		if (!msg_status[i])
+			break;
+		msg_status[i]->status = desc_err;
+		cq->bi.tx_msg[ntc] = NULL;
+		/* Zero out any stale data */
+		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	}
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+
+	/* Return number of descriptors actually cleaned */
+	*clean_count = i;
+
+	return ret;
+}
+
+/**
+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		   struct idpf_ctlq_msg *msg_status[])
+{
+	return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
+}
+
+/**
+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t ntp = cq->next_to_post;
+	bool buffs_avail = false;
+	uint16_t tbp = ntp + 1;
+	int status = 0;
+	int i = 0;
+
+	if (*buff_count > cq->ring_size)
+		return -EINVAL;
+
+	if (*buff_count > 0)
+		buffs_avail = true;
+	idpf_acquire_lock(&cq->cq_lock);
+	if (tbp >= cq->ring_size)
+		tbp = 0;
+
+	if (tbp == cq->next_to_clean)
+		/* Nothing to do */
+		goto post_buffs_out;
+
+	/* Post buffers for as many as provided or up until the last one used */
+	while (ntp != cq->next_to_clean) {
+		desc = IDPF_CTLQ_DESC(cq, ntp);
+		if (cq->bi.rx_buff[ntp])
+			goto fill_desc;
+		if (!buffs_avail) {
+			/* If the caller hasn't given us any buffers or
+			 * there are none left, search the ring itself
+			 * for an available buffer to move to this
+			 * entry starting at the next entry in the ring
+			 */
+			tbp = ntp + 1;
+			/* Wrap ring if necessary */
+			if (tbp >= cq->ring_size)
+				tbp = 0;
+
+			while (tbp != cq->next_to_clean) {
+				if (cq->bi.rx_buff[tbp]) {
+					cq->bi.rx_buff[ntp] =
+						cq->bi.rx_buff[tbp];
+					cq->bi.rx_buff[tbp] = NULL;
+
+					/* Found a buffer, no need to
+					 * search anymore
+					 */
+					break;
+				}
+
+				/* Wrap ring if necessary */
+				tbp++;
+				if (tbp >= cq->ring_size)
+					tbp = 0;
+			}
+
+			if (tbp == cq->next_to_clean)
+				goto post_buffs_out;
+		} else {
+			/* Give back pointer to DMA buffer */
+			cq->bi.rx_buff[ntp] = buffs[i];
+			i++;
+
+			if (i >= *buff_count)
+				buffs_avail = false;
+		}
+
+fill_desc:
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+		/* Post buffers to descriptor */
+		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+		ntp++;
+		if (ntp == cq->ring_size)
+			ntp = 0;
+	}
+
+post_buffs_out:
+	/* Only update tail if buffers were actually posted */
+	if (cq->next_to_post != ntp) {
+		if (ntp)
+			/* Update next_to_post to ntp - 1 since current ntp
+			 * will not have a buffer
+			 */
+			cq->next_to_post = ntp - 1;
+		else
+			/* Wrap to end of end ring since current ntp is 0 */
+			cq->next_to_post = cq->ring_size - 1;
+
+		wr32(hw, cq->reg.tail, cq->next_to_post);
+	}
+
+	idpf_release_lock(&cq->cq_lock);
+	/* return the number of buffers that were not posted */
+	*buff_count = *buff_count - i;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+	       struct idpf_ctlq_msg *q_msg)
+{
+	uint16_t num_to_clean, ntc, ret_val, flags;
+	struct idpf_ctlq_desc *desc;
+	int ret_code = 0;
+	uint16_t i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*num_q_msg == 0)
+		return 0;
+	else if (*num_q_msg > cq->ring_size)
+		return -EINVAL;
+
+	/* take the lock before we start messing with the ring */
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *num_q_msg;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		flags = LE16_TO_CPU(desc->flags);
+		if (!(flags & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		ret_val = LE16_TO_CPU(desc->ret_val);
+		q_msg[i].vmvf_type = (flags &
+				     (IDPF_CTLQ_FLAG_FTYPE_VM |
+				      IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+				      IDPF_CTLQ_FLAG_FTYPE_S;
+
+		if (flags & IDPF_CTLQ_FLAG_ERR)
+			ret_code = -EBADMSG;
+
+		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
+		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
+		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+		q_msg[i].status = ret_val;
+
+		if (desc->datalen) {
+			idpf_memcpy(q_msg[i].ctx.indirect.context,
+				    &desc->params.indirect,
+				    IDPF_INDIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+
+			/* Assign pointer to dma buffer to ctlq_msg array
+			 * to be given to upper layer
+			 */
+			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+			/* Zero out pointer to DMA buffer info;
+			 * will be repopulated by post buffers API
+			 */
+			cq->bi.rx_buff[ntc] = NULL;
+		} else {
+			idpf_memcpy(q_msg[i].ctx.direct,
+				    desc->params.raw,
+				    IDPF_DIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+		}
+
+		/* Zero out stale data in descriptor */
+		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+			    IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	};
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+	*num_q_msg = i;
+	if (*num_q_msg == 0)
+		ret_code = -ENOMSG;
+
+	return ret_code;
+}
+
+int
+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+		    struct idpf_ctlq_info **cq)
+{
+	return cpfl_ctlq_add(hw, qinfo, cq);
+}
+
+/**
+ * cpfl_ctlq_shutdown - shutdown the CQ
+ * The main shutdown routine for any controq queue
+ */
+static void
+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (!cq->ring_size)
+		goto shutdown_sq_out;
+
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+
+	/* Set ring_size to 0 to indicate uninitialized queue */
+	cq->ring_size = 0;
+
+shutdown_sq_out:
+	idpf_release_lock(&cq->cq_lock);
+	idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * cpfl_ctlq_remove - deallocate and remove specified control queue
+ */
+static void
+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	LIST_REMOVE(cq, cq_list);
+	cpfl_ctlq_shutdown(hw, cq);
+	idpf_free(hw, cq);
+}
+
+void
+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	cpfl_ctlq_remove(hw, cq);
+}
+
+int
+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		     struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			      uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
+
+int
+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			 struct idpf_ctlq_msg *msg_status[])
+{
+	return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
new file mode 100644
index 0000000000..740ae6522c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CONTROLQ_H_
+#define _CPFL_CONTROLQ_H_
+
+#include "base/idpf_osdep.h"
+#include "base/idpf_controlq_api.h"
+
+#define CPFL_CTLQ_DESCRIPTOR_SIZE	32
+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE	4096
+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE	256
+#define CPFL_DFLT_MBX_RING_LEN		512
+#define CPFL_CFGQ_RING_LEN		512
+
+/* CRQ/CSQ specific error codes */
+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */
+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */
+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */
+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */
+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct cpfl_ctlq_create_info {
+	enum idpf_ctlq_type type;
+	int id; /* absolute queue offset passed as input
+		 * -1 for default mailbox if present
+		 */
+	uint16_t len; /* Queue length passed as input */
+	uint16_t buf_size; /* buffer size passed as input */
+	uint64_t base_address; /* output, HPA of the Queue start  */
+	struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+	/* Pass down previously allocated descriptor ring and buffer memory
+	 * for each control queue to be created
+	 */
+	struct idpf_dma_mem ring_mem;
+	/* The CP will allocate one large buffer that the CPFlib will piece
+	 * into individual buffers for each descriptor
+	 */
+	struct idpf_dma_mem buf_mem;
+
+	int ext_info_size;
+	void *ext_info; /* Specific to q type */
+};
+
+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
+			     struct idpf_ctlq_info *cq,
+			     struct cpfl_ctlq_create_info *qinfo);
+int cpfl_ctlq_add(struct idpf_hw *hw,
+		  struct cpfl_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq);
+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+		       struct idpf_ctlq_msg *msg_status[]);
+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			    u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg);
+int cpfl_vport_ctlq_add(struct idpf_hw *hw,
+			struct cpfl_ctlq_create_info *qinfo,
+			struct idpf_ctlq_info **cq);
+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			 u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+			 struct idpf_ctlq_msg q_msg[]);
+
+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+				  u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+			     struct idpf_ctlq_msg *msg_status[]);
+#endif
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c350728861..a2bc6784d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1698,6 +1698,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 		return;
 	}
 
+	/* ignore if it is ctrl vport */
+	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
+		return;
+
 	vport = cpfl_find_vport(adapter, vc_event->vport_id);
 	if (!vport) {
 		PMD_DRV_LOG(ERR, "Can't find vport.");
@@ -1893,6 +1897,262 @@ cpfl_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 }
 
+static int
+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	ret = cpfl_config_ctlq_tx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
+		return ret;
+	}
+
+	ret = cpfl_config_ctlq_rx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
+		return ret;
+	}
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	int i;
+
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (adapter->ctlqp[i])
+			cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+}
+
+static int
+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_ctlq_info *cfg_cq;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		cfg_cq = NULL;
+		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
+					  &adapter->cfgq_info[i],
+					  &cfg_cq);
+		if (ret || !cfg_cq) {
+			PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d",
+				    adapter->cfgq_info[i].id);
+			cpfl_remove_cfgqs(adapter);
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
+			    adapter->cfgq_info[i].id);
+		adapter->ctlqp[i] = cfg_cq;
+	}
+
+	return ret;
+}
+
+#define CPFL_CFGQ_RING_LEN		512
+#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
+#define CPFL_CFGQ_BUFFER_SIZE		256
+#define CPFL_CFGQ_RING_SIZE		512
+
+static int
+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	struct cpfl_vport *vport;
+	int i, err;
+	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);
+	uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;
+
+	vport = &adapter->ctrl_vport;
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (i % 2 == 0) {
+			/* Setup Tx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +
+				i / 2 * vport->base.chunks_info.tx_qtail_spacing;
+		} else {
+			/* Setup Rx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +
+				i / 2 * vport->base.chunks_info.rx_qtail_spacing;
+			if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,
+						buf_size)) {
+				err = -ENOMEM;
+				goto free_mem;
+			}
+		}
+		if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,
+					ring_size)) {
+			err = -ENOMEM;
+			goto free_mem;
+		}
+	}
+	return 0;
+free_mem:
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+	return err;
+}
+
+static int
+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_create_vport *vport_info =
+		(struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;
+	int i;
+
+	vport->itf.adapter = adapter;
+	vport->base.adapter = &adapter->base;
+	vport->base.vport_id = vport_info->vport_id;
+
+	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+		if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {
+			vport->base.chunks_info.tx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.tx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.tx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {
+			vport->base.chunks_info.rx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.rx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.rx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else {
+			PMD_INIT_LOG(ERR, "Unsupported chunk type");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)
+{
+	cpfl_stop_cfgqs(adapter);
+	cpfl_remove_cfgqs(adapter);
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+}
+
+static int
+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_vc_create_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to create control vport");
+		return ret;
+	}
+
+	ret = cpfl_init_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init control vport");
+		goto err_init_ctrl_vport;
+	}
+
+	ret = cpfl_cfgq_setup(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control queues");
+		goto err_cfgq_setup;
+	}
+
+	ret = cpfl_add_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to add control queues");
+		goto err_add_cfgq;
+	}
+
+	ret = cpfl_start_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to start control queues");
+		goto err_start_cfgqs;
+	}
+
+	return 0;
+
+err_start_cfgqs:
+	cpfl_stop_cfgqs(adapter);
+err_add_cfgq:
+	cpfl_remove_cfgqs(adapter);
+err_cfgq_setup:
+err_init_ctrl_vport:
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+
+	return ret;
+}
+
 static struct virtchnl2_get_capabilities req_caps = {
 	.csum_caps =
 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
@@ -2060,6 +2320,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+	ret = cpfl_ctrl_path_open(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control path");
+		goto err_create_ctrl_vport;
+	}
+
 #ifdef RTE_HAS_JANSSON
 	ret = cpfl_flow_init(adapter);
 	if (ret) {
@@ -2076,7 +2342,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 #ifdef RTE_HAS_JANSSON
 err_flow_init:
+	cpfl_ctrl_path_close(adapter);
 #endif
+err_create_ctrl_vport:
+	rte_free(adapter->vports);
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2315,6 +2584,7 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 #ifdef RTE_HAS_JANSSON
 	cpfl_flow_uninit(adapter);
 #endif
+	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 69bf32cfbd..7f83d170d7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -22,6 +22,7 @@
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
 #include "cpfl_representor.h"
+#include "cpfl_controlq.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -82,6 +83,10 @@
 #define CPFL_META_CHUNK_LENGTH	1024
 #define CPFL_META_LENGTH	32
 
+#define CPFL_RX_CFGQ_NUM	4
+#define CPFL_TX_CFGQ_NUM	4
+#define CPFL_CFGQ_NUM		8
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -212,6 +217,12 @@ struct cpfl_adapter_ext {
 	struct cpfl_flow_js_parser *flow_parser;
 
 	struct cpfl_metadata meta;
+
+	/* ctrl vport and ctrl queues. */
+	struct cpfl_vport ctrl_vport;
+	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
+	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
+	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -226,6 +237,9 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 			   struct cpchnl2_vport_id *vport_id,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_info_response *response);
+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
index a21a4a451f..932840a972 100644
--- a/drivers/net/cpfl/cpfl_vchnl.c
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 
 	return 0;
 }
+
+int
+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct virtchnl2_create_vport vport_msg;
+	struct idpf_cmd_info args;
+	int err = -1;
+
+	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
+	vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+	vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
+	vport_msg.num_tx_complq = 0;
+	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
+	vport_msg.num_rx_bufq = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+	args.in_args = (uint8_t *)&vport_msg;
+	args.in_args_size = sizeof(vport_msg);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+		return err;
+	}
+
+	rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
+		   IDPF_DFLT_MBX_BUF_SIZE);
+	return err;
+}
+
+int
+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_RX_CFGQ_NUM;
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (!vc_rxqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->base.vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		rxq_info = &vc_rxqs->qinfo[i];
+		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
+		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;
+		rxq_info->max_pkt_size = vport->base.max_pkt_len;
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_rxqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_TX_CFGQ_NUM;
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (!vc_txqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->base.vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		txq_info = &vc_txqs->qinfo[i];
+		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
+		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_txqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index f5654d5b0e..290ff1e655 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -18,6 +18,7 @@ sources = files(
         'cpfl_rxtx.c',
         'cpfl_vchnl.c',
         'cpfl_representor.c',
+        'cpfl_controlq.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v8 5/9] net/cpfl: add FXP low level implementation
  2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
                               ` (3 preceding siblings ...)
  2023-09-27 12:54             ` [PATCH v8 4/9] net/cpfl: set up control path yuying.zhang
@ 2023-09-27 12:54             ` yuying.zhang
  2023-09-27 12:54             ` [PATCH v8 6/9] net/cpfl: add fxp rule module yuying.zhang
                               ` (5 subsequent siblings)
  10 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-27 12:54 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add low level implementation for CPFL PMD to create / delete
rules on IPU's Flexible Packet Processor(FXP).

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_actions.h | 858 ++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_rules.c   | 127 +++++
 drivers/net/cpfl/cpfl_rules.h   | 306 ++++++++++++
 drivers/net/cpfl/meson.build    |   1 +
 4 files changed, 1292 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h
new file mode 100644
index 0000000000..7b82119e39
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_actions.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_ACTIONS_H_
+#define _CPFL_ACTIONS_H_
+
+#include "base/idpf_osdep.h"
+
+#pragma pack(1)
+
+union cpfl_action_set {
+	uint32_t data;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 4;
+		uint32_t tag : 1;
+		uint32_t prec : 3;
+	} set_24b_a;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 3;
+		uint32_t tag : 2;
+		uint32_t prec : 3;
+	} set_24b_b;
+
+	struct {
+		uint32_t val : 16;
+		uint32_t idx : 4;
+		uint32_t unused : 6;
+		uint32_t tag : 3;
+		uint32_t prec : 3;
+	} set_16b;
+
+	struct {
+		uint32_t val_a : 8;
+		uint32_t val_b : 8;
+		uint32_t idx_a : 4;
+		uint32_t idx_b : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_8b;
+
+	struct {
+		uint32_t val : 10;
+		uint32_t ena : 10;
+		uint32_t idx : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_1b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} nop;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} chained_24b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} aux_flags;
+};
+
+struct cpfl_action_set_ext {
+#define CPFL_ACTION_SET_EXT_CNT 2
+	union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT];
+};
+
+#pragma pack()
+
+/**
+ * cpfl_act_nop - Encode a NOP action
+ */
+static inline union cpfl_action_set
+cpfl_act_nop(void)
+{
+	union cpfl_action_set act;
+
+	act.data = 0;
+	return act;
+}
+
+/**
+ * cpfl_is_nop_action - Indicate if an action set is a NOP
+ */
+static inline bool
+cpfl_is_nop_action(union cpfl_action_set *act)
+{
+	return act->data == cpfl_act_nop().data;
+}
+
+#define CPFL_MAKE_MASK32(b, s)	((((uint32_t)1 << (b)) - 1) << (s))
+
+#define CPFL_ACT_PREC_MAX	7
+#define CPFL_ACT_PREC_S		29
+#define CPFL_ACT_PREC_M		CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S)
+#define CPFL_ACT_PREC_SET(p)	\
+	(((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M)
+#define CPFL_ACT_PREC_CHECK(p)	((p) > 0 && (p) <= CPFL_ACT_PREC_MAX)
+
+#define CPFL_METADATA_ID_CNT		32	/* Max number of metadata IDs */
+#define CPFL_METADATA_STRUCT_MAX_SZ	128	/* Max metadata size per ID */
+
+/*******************************************************************************
+ * 1-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_1B_OP_S	24
+#define CPFL_ACT_1B_OP_M	CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S)
+#define CPFL_ACT_1B_OP		((uint32_t)(0x01) << CPFL_ACT_1B_OP_S)
+
+#define CPFL_ACT_1B_VAL_S	0
+#define CPFL_ACT_1B_VAL_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S)
+#define CPFL_ACT_1B_EN_S	10
+#define CPFL_ACT_1B_EN_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S)
+#define CPFL_ACT_1B_INDEX_S	20
+#define CPFL_ACT_1B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S)
+
+/* 1-bit actions currently uses only INDEX of 0 */
+#define CPFL_ACT_MAKE_1B(prec, en, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \
+	 ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \
+	 (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M))
+
+enum cpfl_act_1b_op {
+	CPFL_ACT_1B_OP_DROP		= 0x01,
+	CPFL_ACT_1B_OP_HDR_SPLIT	= 0x02,
+	CPFL_ACT_1B_OP_DIR_CHANGE	= 0x04,
+	CPFL_ACT_1B_OP_DEFER_DROP	= 0x08,
+	CPFL_ACT_1B_OP_ORIG_MIR_MD	= 0x80
+};
+
+#define CPFL_ACT_1B_COMMIT_MODE_S	4
+#define CPFL_ACT_1B_COMMIT_MODE_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S)
+
+/**
+ * cpfl_act_commit_mode - action commit mode for certain action classes
+ */
+enum cpfl_act_commit_mode {
+	/* Action processing for the initial classification pass */
+	CPFL_ACT_COMMIT_ALL		= 0, /* Commit all actions */
+	CPFL_ACT_COMMIT_PRE_MOD		= 1, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_NONE		= 2, /* Commit no action */
+	/* Action processing for deferred actions in a recirculation pass */
+	CPFL_ACT_COMMIT_RECIR_ALL	= 4, /* Commit all actions */
+	CPFL_ACT_COMMIT_RECIR_PRE_MOD	= 5, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_RECIR_NONE	= 6  /* Commit no action */
+};
+
+/*******************************************************************************
+ * 8-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_8B_S	24
+#define CPFL_ACT_OP_8B_M	CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S)
+#define CPFL_ACT_OP_8B		((uint32_t)(0x02) << CPFL_ACT_OP_8B_S)
+
+#define CPFL_ACT_8B_A_VAL_S	0
+#define CPFL_ACT_8B_A_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S)
+#define CPFL_ACT_8B_A_INDEX_S	16
+#define CPFL_ACT_8B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S)
+
+#define CPFL_ACT_8B_B_VAL_S	8
+#define CPFL_ACT_8B_B_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S)
+#define CPFL_ACT_8B_B_INDEX_S	20
+#define CPFL_ACT_8B_B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S)
+
+/* Unless combining two 8-bit actions into an action set, both A and B fields
+ * must be the same,
+ */
+#define CPFL_ACT_MAKE_8B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \
+	 (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \
+	 (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \
+	 (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \
+	 (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M))
+
+/* 8-Bit Action Indices */
+#define CPFL_ACT_8B_INDEX_MOD_META		9
+
+/* 8-Bit Action Miscellaneous */
+#define CPFL_ACT_8B_MOD_META_PROF_CNT		16
+#define CPFL_ACT_8B_MOD_META_VALID		0x80
+
+/*******************************************************************************
+ * 16-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_16B_S	26
+#define CPFL_ACT_OP_16B_M	CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S)
+#define CPFL_ACT_OP_16B		((uint32_t)0x1 << CPFL_ACT_OP_16B_S)
+
+#define CPFL_ACT_16B_INDEX_S	16
+#define CPFL_ACT_16B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S)
+#define CPFL_ACT_16B_VAL_S	0
+#define CPFL_ACT_16B_VAL_M	CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S)
+
+#define CPFL_ACT_MAKE_16B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \
+	 (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M))
+
+/* 16-Bit Action Indices */
+#define CPFL_ACT_16B_INDEX_COUNT_SET		0
+#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX	1
+#define CPFL_ACT_16B_INDEX_SET_VSI		2
+#define CPFL_ACT_16B_INDEX_DEL_MD		4
+#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST		5
+
+/* 16-Bit Action Miscellaneous */
+#define CPFL_ACT_16B_COUNT_SET_CNT		2048 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_SET_VSI_SLOTS		2
+#define CPFL_ACT_16B_FWD_VSI_CNT		1032 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_FWD_VSI_LIST_CNT		256
+#define CPFL_ACT_16B_MOD_VSI_LIST_CNT		1024
+#define CPFL_ACT_16B_FWD_PORT_CNT		4
+#define CPFL_ACT_16B_DEL_MD_MID_CNT		32
+#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS		4
+
+/* 16-Bit SET_MCAST_IDX Action */
+#define CPFL_ACT_16B_SET_MCAST_VALID	((uint32_t)1 << 15)
+
+/* 16-Bit SET_VSI Action Variants */
+#define CPFL_ACT_16B_SET_VSI_VAL_S		0
+#define CPFL_ACT_16B_SET_VSI_VAL_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S)
+#define CPFL_ACT_16B_SET_VSI_PE_S		11
+#define CPFL_ACT_16B_SET_VSI_PE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S)
+#define CPFL_ACT_16B_SET_VSI_TYPE_S		14
+#define CPFL_ACT_16B_SET_VSI_TYPE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S)
+
+/* 16-Bit DEL_MD Action */
+#define CPFL_ACT_16B_DEL_MD_0_S		0
+#define CPFL_ACT_16B_DEL_MD_1_S		5
+
+/* 16-Bit MOD_VSI_LIST Actions */
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S	0
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M	\
+	CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S)
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S	14
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S)
+#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \
+	((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \
+	 (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_ID_M))
+
+#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \
+	((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \
+		CPFL_ACT_16B_SET_VSI_TYPE_M) | \
+	 (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \
+		CPFL_ACT_16B_SET_VSI_PE_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \
+		CPFL_ACT_16B_SET_VSI_VAL_M))
+
+enum cpfl_prot_eng {
+	CPFL_PE_LAN = 0,
+	CPFL_PE_RDMA,
+	CPFL_PE_CRT
+};
+
+enum cpfl_act_fwd_type {
+	CPFL_ACT_FWD_VSI,
+	CPFL_ACT_FWD_VSI_LIST,
+	CPFL_ACT_FWD_PORT
+};
+
+/*******************************************************************************
+ * 24-Bit Actions
+ ******************************************************************************/
+/* Group A */
+#define CPFL_ACT_OP_24B_A_S	28
+#define CPFL_ACT_OP_24B_A_M	CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S)
+#define CPFL_ACT_24B_A_INDEX_S	24
+#define CPFL_ACT_24B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S)
+#define CPFL_ACT_24B_A_VAL_S	0
+#define CPFL_ACT_24B_A_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S)
+
+#define CPFL_ACT_OP_24B_A	((uint32_t)1 << CPFL_ACT_OP_24B_A_S)
+
+#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_MOD_ADDR	0
+#define CPFL_ACT_24B_INDEX_MIRROR_FIRST	1
+#define CPFL_ACT_24B_INDEX_COUNT	2
+#define CPFL_ACT_24B_INDEX_SET_Q	8
+#define CPFL_ACT_24B_INDEX_MOD_PROFILE	9
+#define CPFL_ACT_24B_INDEX_METER	10
+
+#define CPFL_ACT_24B_COUNT_SLOTS	6
+#define CPFL_ACT_24B_METER_SLOTS	6
+
+#define CPFL_ACT_24B_MOD_ADDR_CNT	(16 * 1024 * 1024)
+#define CPFL_ACT_24B_COUNT_ID_CNT	((uint32_t)1 << 24)
+#define CPFL_ACT_24B_SET_Q_CNT		(12 * 1024)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS	3
+
+/* 24-Bit SET_Q Action */
+#define CPFL_ACT_24B_SET_Q_Q_S		0
+#define CPFL_ACT_24B_SET_Q_Q_M		\
+	CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_S	14
+#define CPFL_ACT_24B_SET_Q_Q_RGN_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S)
+#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS	CPFL_MAKE_MASK32(1, 17)
+#define CPFL_ACT_24B_SET_Q_DST_PE_S	21
+#define CPFL_ACT_24B_SET_Q_DST_PE_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S)
+#define CPFL_ACT_24B_SET_Q_VALID	CPFL_MAKE_MASK32(1, 23)
+
+/* 24-Bit MOD_PROFILE Action */
+enum cpfl_act_mod_profile_hint {
+	CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */
+};
+
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_S		0
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S)
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S	12
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S)
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_S		14
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S)
+#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS		((uint32_t)1 << 16)
+#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND	((uint32_t)1 << 17)
+#define CPFL_ACT_24B_MOD_PROFILE_VALID			((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES	4
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT		2048
+
+/* 24-Bit METER Actions */
+#define CPFL_ACT_24B_METER_INDEX_S	0
+#define CPFL_ACT_24B_METER_INDEX_M	\
+	CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S)
+#define CPFL_ACT_24B_METER_BANK_S	20
+#define CPFL_ACT_24B_METER_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S)
+#define CPFL_ACT_24B_METER_VALID	((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_METER_BANK_CNT	6
+#define CPFL_ACT_24B_METER_INDEX_CNT	((uint32_t)1 << 20)
+
+/* Group B */
+#define CPFL_ACT_OP_24B_B_S	27
+#define CPFL_ACT_OP_24B_B_M	CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S)
+#define CPFL_ACT_24B_B_INDEX_S	24
+#define CPFL_ACT_24B_B_INDEX_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S)
+#define CPFL_ACT_24B_B_VAL_S	0
+#define CPFL_ACT_24B_B_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S)
+
+#define CPFL_ACT_OP_24B_B	((uint32_t)1 << CPFL_ACT_OP_24B_B_S)
+
+#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_SET_MD	0
+#define CPFL_ACT_24B_INDEX_RANGE_CHECK	6
+#define CPFL_ACT_24B_SET_MD_SLOTS	6
+
+/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */
+/* 8-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD8_VAL_S	0
+#define CPFL_ACT_24B_SET_MD8_VAL_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S)
+#define CPFL_ACT_24B_SET_MD8_MASK_S	8
+#define CPFL_ACT_24B_SET_MD8_MASK_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S)
+#define CPFL_ACT_24B_SET_MD8_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD8_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S)
+/* 16-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD16_VAL_S	0
+#define CPFL_ACT_24B_SET_MD16_VAL_M	\
+	CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_L_S	16 /* For chained action */
+#define CPFL_ACT_24B_SET_MD16_MASK_L_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_H_SR	8
+#define CPFL_ACT_24B_SET_MD16_MASK_H_M	0xff
+#define CPFL_ACT_24B_SET_MD16_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD16_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD16		((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_SET_MD32_VAL_L_M	CPFL_MAKE_MASK32(24, 0)
+
+#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX		15
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX	7
+#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX	15
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX	7
+
+/* RANGE_CHECK Action */
+enum cpfl_rule_act_rc_mode {
+	CPFL_RULE_ACT_RC_1_RANGE = 0,
+	CPFL_RULE_ACT_RC_2_RANGES = 1,
+	CPFL_RULE_ACT_RC_4_RANGES = 2,
+	CPFL_RULE_ACT_RC_8_RANGES = 3
+};
+
+#define CPFL_ACT_24B_RC_TBL_IDX_S	0
+#define CPFL_ACT_24B_RC_TBL_IDX_M	\
+	CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S)
+#define CPFL_ACT_24B_RC_START_BANK_S	13
+#define CPFL_ACT_24B_RC_START_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S)
+#define CPFL_ACT_24B_RC_MODE_S		16
+#define CPFL_ACT_24B_RC_MODE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S)
+#define CPFL_ACT_24B_RC_XTRACT_PROF_S	18
+#define CPFL_ACT_24B_RC_XTRACT_PROF_M	\
+	CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S)
+
+#define CPFL_ACT_24B_RC_TBL_INDEX_CNT	(8 * 1024)
+#define CPFL_ACT_24B_RC_BANK_CNT	8
+#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT	64
+
+/*******************************************************************************
+ * 24-Bit Chained Auxiliary Actions
+ ******************************************************************************/
+
+/* TODO: HAS is being updated.  Revise the order of chained and base action
+ * when the HAS has it finalized.
+ */
+/**
+ * 24-Bit Chained SET_MD Actions
+ *
+ * Chained SET_MD actions consume two consecutive action sets.  The first one is
+ * the chained AUX action set.  The second one is the base/parent action set.
+ * Chained SET_MD actions can add and/or update metadata structure with IDs from
+ * 0 to 31 while the non-chained SET_MD variants can only update existing meta-
+ * data IDs below 16.
+ */
+
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S	8
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M	\
+	CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD_AUX_ADD		((uint32_t)1 << 15)
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S	16
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_S		0
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_M		\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S	0
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S)
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR	24 /* Upper 8 bits of MD32 */
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M	0xff
+
+#define CPFL_ACT_TYPE_CHAIN_DATA_S	29
+#define CPFL_ACT_TYPE_CHAIN_DATA_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S)
+#define CPFL_ACT_TYPE_CHAIN_DATA	((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_OP_S	21
+#define CPFL_ACT_24B_SET_MD_OP_8B	((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_16B	((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_32B	((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \
+	(CPFL_ACT_TYPE_CHAIN_DATA | (op) | \
+	 (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \
+	 (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \
+	 (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_DATA_M))
+
+/*******************************************************************************
+ * 1-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_drop - Encode a 1-bit DROP action
+ *
+ * The DROP action has precedence over the DEFER_DOP action.
+ * Affect of ACT_COMMIT action on the DROP action:
+ *  - CPFL_ACT_COMMIT_ALL: Packet is dropped.
+ *  - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped.
+ *  - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped.  Recirculation is canceled.
+ *  - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not
+ *    dropped. Recirculation continues.
+ *
+ * Once a DROP action is set, it cannot be reverted during the classification
+ * process of a network packet.
+ */
+static inline union cpfl_action_set
+cpfl_act_drop(uint8_t prec)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1);
+	return a;
+}
+
+/**
+ * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action
+ * An ACT_COMMIT action specifies if and when all actions are committed.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M,
+				  (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S);
+	return a;
+}
+
+/*******************************************************************************
+ * 8-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_meta - Encode an 8-bit MOD_META action
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_meta(uint8_t prec, uint8_t prof)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META,
+				  CPFL_ACT_8B_MOD_META_VALID | prof);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 16-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI)
+ *
+ * This encodes the "Forward to Single VSI" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    vsi >= CPFL_ACT_16B_FWD_VSI_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port)
+ *
+ * This encodes the "Forward to a port" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    port >= CPFL_ACT_16B_FWD_PORT_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 24-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action
+ *
+ * This MOD_ADDR specifies the index of the MOD content entry an accompanying
+ * MOD_PROFILE action uses.  Some MOD_PROFILE actions may need to use extra
+ * information from a Modify content entry, and requires an accompanying
+ * MOD_ADDR action.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR,
+				     mod_addr);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant)
+ *
+ * This action is a "Forward to a single queue" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q,
+			bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region)
+ *
+ * This action is a "Forward to a queue region" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base,
+			       uint8_t q_rgn_bits, bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT ||
+	    q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base |
+		((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action
+ *
+ * This action specifies a Modify profile to use for modifying the network
+ * packet being classified.  In addition, it also provides a hint to whether
+ * or not an accompanied MOD_ADDR action is expected and should be prefetched.
+ *
+ * There is only one MOD_PROFILE action slot.  If multiple classification blocks
+ * emit this action, the precedence value and auxiliary precedence value will be
+ * used to select one with higher precedence.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus,
+		     bool miss_prepend, enum cpfl_act_mod_profile_hint hint)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) ||
+	    prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT ||
+	    ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_MOD_PROFILE_VALID |
+		(((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) &
+			CPFL_ACT_24B_MOD_PROFILE_HINT_M) |
+		(((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) &
+			CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) |
+		((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S);
+	if (append_act_bus)
+		val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS;
+	if (miss_prepend)
+		val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND;
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_meter - Encode a 24-bit METER action
+ *
+ * Return NOP if any given input parameter is invalid.
+ *
+ * A bank can only be used by one of the METER action slots.  If multiple METER
+ * actions select the same bank, the action with the highest action slot wins.
+ * In Policer mode, METER actions at the higher indexes have precedence over
+ * ones at lower indexes.
+ */
+static inline union cpfl_action_set
+cpfl_act_meter(uint8_t slot, uint8_t prec, uint32_t idx, uint8_t bank)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS  ||
+	    idx >= CPFL_ACT_24B_METER_INDEX_CNT ||
+	    bank >= CPFL_ACT_24B_METER_BANK_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_METER_VALID |
+		(uint32_t)idx << CPFL_ACT_24B_METER_INDEX_S |
+		(uint32_t)bank << CPFL_ACT_24B_METER_BANK_S;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot,
+				     val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot
+ *
+ * This SET_MD action sets/updates a byte of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 bytes of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX ||
+	    off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) |
+		((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) |
+		((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot
+ *
+ * This SET_MD action sets/updates a word of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 words of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX ||
+	    word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) |
+		((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) |
+		((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot
+ *
+ * This SET_MD action sets/updates a dword of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action is made up of 2 chained
+ * action sets.  The chained action set is the first.  The base/parent action
+ * sets is the second.
+ */
+static inline void
+cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid,
+		      uint8_t off, uint32_t val)
+{
+	if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) ||
+	    mid >= CPFL_METADATA_ID_CNT ||
+	    (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) {
+		ext->acts[0] = cpfl_act_nop();
+		ext->acts[1] = cpfl_act_nop();
+	} else {
+		uint32_t tmp;
+
+		/* Chained action set comes first */
+		tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR;
+		ext->acts[0].data =
+			CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B,
+						     mid, off, tmp);
+
+		/* Lower 24 bits of value */
+		tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M;
+		ext->acts[1].data =
+			CPFL_ACT_MAKE_24B_B(prec,
+					    CPFL_ACT_24B_INDEX_SET_MD + slot,
+					    tmp);
+	}
+}
+
+#endif /* _CPFL_ACTIONS_H_ */
diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c
new file mode 100644
index 0000000000..3d259d3da8
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include <base/idpf_controlq.h>
+#include <stdint.h>
+#include "cpfl_rules.h"
+
+ /**
+  * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor
+  */
+static inline uint64_t
+cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
+{
+	uint64_t context = 0;
+
+	switch (cmn_cfg->opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		/* fallthrough */
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+	case cpfl_ctlq_sem_add_rule:
+	case cpfl_ctlq_sem_del_rule:
+	case cpfl_ctlq_sem_query_rule:
+	case cpfl_ctlq_sem_update_rule:
+		context |= SHIFT_VAL64(cmn_cfg->time_sel,
+				       MEV_RULE_TIME_SEL);
+		context |= SHIFT_VAL64(cmn_cfg->time_sel_val,
+				       MEV_RULE_TIME_SEL_VAL);
+		context |= SHIFT_VAL64(cmn_cfg->host_id,
+				       MEV_RULE_HOST_ID);
+		context |= SHIFT_VAL64(cmn_cfg->port_num,
+				       MEV_RULE_PORT_NUM);
+		context |= SHIFT_VAL64(cmn_cfg->resp_req,
+				       MEV_RULE_RESP_REQ);
+		context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru,
+				       MEV_RULE_CACHE_WR_THRU);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc_ctx - get bit context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
+{
+	uint64_t context = 0;
+
+	context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common);
+
+	switch (cfg_data->common.opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size,
+				       MEV_RULE_MOD_OBJ_SIZE);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content,
+				       MEV_RULE_PIN_MOD_CONTENT);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.index,
+				       MEV_RULE_MOD_INDEX);
+		break;
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
+				       MEV_RULE_OBJ_ID);
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
+				       MEV_RULE_OBJ_ADDR);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc - build descriptor data from rule config data
+ *
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg)
+{
+	uint64_t context;
+	uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0];
+
+	context = cpfl_prep_rule_desc_ctx(cfg_data);
+	*ctlq_ctx = CPU_TO_LE64(context);
+	memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t));
+	ctlq_msg->opcode = (uint16_t)cfg_data->common.opc;
+	ctlq_msg->data_len = cfg_data->common.buf_len;
+	ctlq_msg->status = 0;
+	ctlq_msg->ctx.indirect.payload = cfg_data->common.payload;
+}
+
+/**
+ * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob)
+{
+	uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions;
+	const uint32_t *act_src = (const uint32_t *)act_bytes;
+	uint32_t i;
+
+	idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
+	idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len,
+		    CPFL_NONDMA_TO_DMA);
+
+	for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
+		*act_dst++ = CPU_TO_LE32(*act_src++);
+
+	rule_blob->sem_rule.cfg_ctrl[0] = cfg_ctrl & 0xFF;
+	rule_blob->sem_rule.cfg_ctrl[1] = (cfg_ctrl >> 8) & 0xFF;
+}
diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h
new file mode 100644
index 0000000000..d23eae8e91
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_RULES_API_H_
+#define _CPFL_RULES_API_H_
+
+#include <base/idpf_controlq_api.h>
+#include "cpfl_actions.h"
+#include "cpfl_controlq.h"
+
+/* Common Bit Mask Macros */
+#define CPFL_BIT(b)			(1 << (b))
+
+#define MAKE_MASK(type, mask, shift)	((u##type) (mask) << (shift))
+#define SHIFT_VAL_LT(type, val, field)		\
+		(((u##type)(val) << field##_S) & field##_M)
+#define SHIFT_VAL_RT(type, val, field)		\
+		(((u##type)(val) & field##_M) >> field##_S)
+
+#define MAKE_MASK_VAL(type, bit_len)	(((u##type)0x01 << (bit_len)) - 1)
+#define MAKE_MASK_VAL16(bit_len)	MAKE_MASK_VAL(16, bit_len)
+#define MAKE_MASK_VAL64(bit_len)	MAKE_MASK_VAL(64, bit_len)
+
+#define MAKE_MASK64(mask, shift)	MAKE_MASK(64, mask, shift)
+#define MAKE_MASK16(mask, shift)	MAKE_MASK(16, mask, shift)
+#define MAKE_MASK32(mask, shift)	MAKE_MASK(32, mask, shift)
+
+/* Make masks with bit length and left-shifting count */
+#define MAKE_SMASK(type, bits, shift)	\
+	((((u##type)1 << (bits)) - 1) << (shift))
+#define MAKE_SMASK64(bits, shift)	MAKE_SMASK(64, bits, shift)
+#define MAKE_SMASK32(bits, shift)	MAKE_SMASK(32, bits, shift)
+#define MAKE_SMASK16(bits, shift)	MAKE_SMASK(16, bits, shift)
+
+#define SHIFT_VAL64(val, field)		SHIFT_VAL_LT(64, val, field)
+#define SHIFT_VAL32(val, field)		SHIFT_VAL_LT(32, val, field)
+#define SHIFT_VAL16(val, field)		SHIFT_VAL_LT(16, val, field)
+
+/* Rule Config queue opcodes */
+enum cpfl_ctlq_rule_cfg_opc {
+	cpfl_ctlq_sem_add_rule				= 0x1303,
+	cpfl_ctlq_sem_update_rule			= 0x1304,
+	cpfl_ctlq_sem_del_rule				= 0x1305,
+	cpfl_ctlq_sem_query_rule			= 0x1306,
+	cpfl_ctlq_sem_query_rule_hash_addr		= 0x1307,
+	cpfl_ctlq_sem_query_del_rule_hash_addr		= 0x1308,
+
+	cpfl_ctlq_mod_add_update_rule			= 0x1360,
+	cpfl_ctlq_mod_query_rule			= 0x1361,
+};
+
+enum cpfl_cfg_pkt_error_code {
+	CPFL_CFG_PKT_ERR_OK = 0,
+	CPFL_CFG_PKT_ERR_ESRCH = 1,     /* Bad opcode */
+	CPFL_CFG_PKT_ERR_EEXIST = 2,    /* Entry Already exists */
+	CPFL_CFG_PKT_ERR_ENOSPC = 4,    /* No space left in the table*/
+	CPFL_CFG_PKT_ERR_ERANGE = 5,    /* Parameter out of range */
+	CPFL_CFG_PKT_ERR_ESBCOMP = 6,   /* Completion Error */
+	CPFL_CFG_PKT_ERR_ENOPIN = 7,    /* Entry cannot be pinned in cache */
+	CPFL_CFG_PKT_ERR_ENOTFND = 8,   /* Entry Not exists */
+	CPFL_CFG_PKT_ERR_EMAXCOL = 9    /* Max Hash Collision */
+};
+
+/* macros for creating context for rule descriptor */
+#define MEV_RULE_VSI_ID_S		0
+#define MEV_RULE_VSI_ID_M		\
+		MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S)
+
+#define MEV_RULE_TIME_SEL_S		13
+#define MEV_RULE_TIME_SEL_M		\
+		MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S)
+
+#define MEV_RULE_TIME_SEL_VAL_S		15
+#define MEV_RULE_TIME_SEL_VAL_M		\
+		MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S)
+
+#define MEV_RULE_PORT_NUM_S		16
+#define MEV_RULE_HOST_ID_S		18
+#define MEV_RULE_PORT_NUM_M		\
+		MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S)
+#define MEV_RULE_HOST_ID_M		\
+		MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S)
+
+#define MEV_RULE_CACHE_WR_THRU_S	21
+#define MEV_RULE_CACHE_WR_THRU_M	\
+		MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S)
+
+#define MEV_RULE_RESP_REQ_S		22
+#define MEV_RULE_RESP_REQ_M		\
+		MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S)
+#define MEV_RULE_OBJ_ADDR_S		24
+#define MEV_RULE_OBJ_ADDR_M		\
+		MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S)
+#define MEV_RULE_OBJ_ID_S		59
+#define MEV_RULE_OBJ_ID_M		\
+		MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S)
+
+/* macros for creating CFG_CTRL for sem/lem rule blob */
+#define MEV_RULE_CFG_CTRL_PROF_ID_S			0
+#define MEV_RULE_CFG_CTRL_PROF_ID_M			\
+		MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S)
+
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S		11
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M		\
+		MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S)
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_S		13
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S)
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S	14
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M	\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S)
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S		15
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S)
+
+/**
+ * macro to build the CFG_CTRL for rule packet data, which is one of
+ * cpfl_prep_sem_rule_blob()'s input parameter.
+ */
+ /* build SEM CFG_CTRL*/
+#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id,		       \
+				       pin_to_cache, fixed_fetch)	       \
+		(SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID)	     | \
+		 SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID)   | \
+		 SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE)    | \
+		 SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH))
+
+/* build LEM CFG_CTRL*/
+#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror)    \
+		(SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID)             | \
+		 SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE)      | \
+		 SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR))
+
+/* macros for creating mod content config packets */
+#define MEV_RULE_MOD_INDEX_S		24
+#define MEV_RULE_MOD_INDEX_M		\
+		MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S)
+
+#define MEV_RULE_PIN_MOD_CONTENT_S	62
+#define MEV_RULE_PIN_MOD_CONTENT_M	\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S)
+#define MEV_RULE_MOD_OBJ_SIZE_S		63
+#define MEV_RULE_MOD_OBJ_SIZE_M		\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S)
+
+/**
+ * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM
+ * note: The key may be in mixed big/little endian format, the rest of members
+ * are in little endian
+ */
+struct cpfl_sem_rule_cfg_pkt {
+#define MEV_SEM_RULE_KEY_SIZE 128
+	uint8_t key[MEV_SEM_RULE_KEY_SIZE];
+
+#define MEV_SEM_RULE_ACT_SIZE 72
+	uint8_t actions[MEV_SEM_RULE_ACT_SIZE];
+
+	/* Bit(s):
+	 * 10:0 : PROFILE_ID
+	 * 12:11: SUB_PROF_ID (used for SEM only)
+	 * 13   : pin the SEM key content into the cache
+	 * 14   : Reserved
+	 * 15   : Fixed_fetch
+	 */
+	uint8_t cfg_ctrl[2];
+
+	/* Bit(s):
+	 * 0:     valid
+	 * 15:1:  Hints
+	 * 26:16: PROFILE_ID, the profile associated with the entry
+	 * 31:27: PF
+	 * 55:32: FLOW ID (assigned by HW)
+	 * 63:56: EPOCH
+	 */
+	uint8_t ctrl_word[8];
+	uint8_t padding[46];
+};
+
+/**
+ * union cpfl_rule_cfg_pkt_record - Describes rule data blob
+ */
+union cpfl_rule_cfg_pkt_record {
+	struct cpfl_sem_rule_cfg_pkt sem_rule;
+	uint8_t pkt_data[256];
+	uint8_t mod_blob[256];
+};
+
+/**
+ * cpfl_rule_query_addr - LEM/SEM Rule Query Address structure
+ */
+struct cpfl_rule_query_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_query_del_addr - Rule Query and Delete Address
+ */
+struct cpfl_rule_query_del_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_mod_content - MOD Rule Content
+ */
+struct cpfl_rule_mod_content {
+	uint8_t	obj_size;
+	uint8_t	pin_content;
+	uint32_t	index;
+};
+
+/**
+ * cpfl_rule_cfg_data_common - data struct for all rule opcodes
+ *note: some rules may only require part of structure
+ */
+struct cpfl_rule_cfg_data_common {
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	uint64_t	cookie;
+	uint16_t	vsi_id;
+	uint8_t	port_num;
+	uint8_t	host_id;
+	uint8_t	time_sel;
+	uint8_t	time_sel_val;
+	uint8_t	cache_wr_thru;
+	uint8_t	resp_req;
+	uint32_t	ret_val;
+	uint16_t	buf_len;
+	struct idpf_dma_mem *payload;
+};
+
+/**
+ * cpfl_rule_cfg_data - rule config data
+ * note: Before sending rule to HW, caller needs to fill
+ *       in this struct then call cpfl_prep_rule_desc().
+ */
+struct cpfl_rule_cfg_data {
+	struct cpfl_rule_cfg_data_common common;
+	union {
+		struct cpfl_rule_query_addr query_addr;
+		struct cpfl_rule_query_del_addr query_del_addr;
+		struct cpfl_rule_mod_content mod_content;
+	} ext;
+};
+
+/**
+ * cpfl_fill_rule_mod_content - fill info for mod content
+ */
+static inline void
+cpfl_fill_rule_mod_content(uint8_t mod_obj_size,
+			   uint8_t pin_mod_content,
+			   uint32_t mod_index,
+			   struct cpfl_rule_mod_content *mod_content)
+{
+	mod_content->obj_size = mod_obj_size;
+	mod_content->pin_content = pin_mod_content;
+	mod_content->index = mod_index;
+}
+
+/**
+ * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes
+ * note: call this function before calls cpfl_prep_rule_desc()
+ */
+static inline void
+cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc,
+			       uint64_t cookie,
+			       uint16_t vsi_id,
+			       uint8_t port_num,
+			       uint8_t host_id,
+			       uint8_t time_sel,
+			       uint8_t time_sel_val,
+			       uint8_t cache_wr_thru,
+			       uint8_t resp_req,
+			       uint16_t payload_len,
+			       struct idpf_dma_mem *payload,
+			       struct cpfl_rule_cfg_data_common *cfg_cmn)
+{
+	cfg_cmn->opc = opc;
+	cfg_cmn->cookie = cookie;
+	cfg_cmn->vsi_id = vsi_id;
+	cfg_cmn->port_num = port_num;
+	cfg_cmn->resp_req = resp_req;
+	cfg_cmn->ret_val = 0;
+	cfg_cmn->host_id = host_id;
+	cfg_cmn->time_sel = time_sel;
+	cfg_cmn->time_sel_val = time_sel_val;
+	cfg_cmn->cache_wr_thru = cache_wr_thru;
+
+	cfg_cmn->buf_len = payload_len;
+	cfg_cmn->payload = payload;
+}
+
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg);
+
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob);
+
+#endif /* _CPFL_RULES_API_H_ */
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 290ff1e655..e2b6621cea 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -19,6 +19,7 @@ sources = files(
         'cpfl_vchnl.c',
         'cpfl_representor.c',
         'cpfl_controlq.c',
+	'cpfl_rules.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v8 6/9] net/cpfl: add fxp rule module
  2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
                               ` (4 preceding siblings ...)
  2023-09-27 12:54             ` [PATCH v8 5/9] net/cpfl: add FXP low level implementation yuying.zhang
@ 2023-09-27 12:54             ` yuying.zhang
  2023-09-27 12:54             ` [PATCH v8 7/9] net/cpfl: add fxp flow engine yuying.zhang
                               ` (4 subsequent siblings)
  10 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-27 12:54 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Implement FXP rule creation / destroying.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c   |  31 ++++
 drivers/net/cpfl/cpfl_ethdev.h   |   6 +
 drivers/net/cpfl/cpfl_fxp_rule.c | 296 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_fxp_rule.h |  68 +++++++
 drivers/net/cpfl/meson.build     |   1 +
 5 files changed, 402 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a2bc6784d0..762fbddfe6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 #include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
 	adapter->vports[vport->sw_idx] = NULL;
+	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
 	rte_free(cpfl_vport);
 
 	return 0;
@@ -2466,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
 	return 0;
 }
 
+int
+cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size,
+			 int batch_size)
+{
+	int i;
+
+	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
+		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < batch_size; i++) {
+		dma[i].va = (void *)((char *)orig_dma->va + size * (i + 1));
+		dma[i].pa = orig_dma->pa + size * (i + 1);
+		dma[i].size = size;
+		dma[i].zone = NULL;
+	}
+	return 0;
+}
+
 static int
 cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
@@ -2515,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
 			    &dev->data->mac_addrs[0]);
 
+	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
+	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
+				       cpfl_vport->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		goto err_mac_addrs;
+
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
 		memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
 		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7f83d170d7..8eeeac9910 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -147,10 +147,14 @@ enum cpfl_itf_type {
 
 TAILQ_HEAD(cpfl_flow_list, rte_flow);
 
+#define CPFL_FLOW_BATCH_SIZE  490
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
 	struct cpfl_flow_list flow_list;
+	struct idpf_dma_mem flow_dma;
+	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
 	void *data;
 };
 
@@ -240,6 +244,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
+int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma,
+			     uint32_t size, int batch_size);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
new file mode 100644
index 0000000000..50fac55432
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.c
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		   struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_msg **msg_ptr_list;
+	u16 clean_count = 0;
+	int num_cleaned = 0;
+	int retries = 0;
+	int ret = 0;
+
+	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+	if (!msg_ptr_list) {
+		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+		goto send_err;
+	}
+
+	while (retries <= CTLQ_SEND_RETRIES) {
+		clean_count = num_q_msg - num_cleaned;
+		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+					       &msg_ptr_list[num_cleaned]);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+			goto send_err;
+		}
+
+		num_cleaned += clean_count;
+		retries++;
+		if (num_cleaned >= num_q_msg)
+			break;
+		rte_delay_us_sleep(10);
+	}
+
+	if (retries > CTLQ_SEND_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for completions");
+		ret = -1;
+		goto send_err;
+	}
+
+send_err:
+	if (msg_ptr_list)
+		free(msg_ptr_list);
+err:
+	return ret;
+}
+
+static int
+cpfl_process_rx_ctlq_msg(u16 num_q_msg, struct idpf_ctlq_msg *q_msg)
+{
+	u16 i;
+
+	if (!num_q_msg || !q_msg)
+		return -EINVAL;
+
+	for (i = 0; i < num_q_msg; i++) {
+		if (q_msg[i].status == CPFL_CFG_PKT_ERR_OK) {
+			continue;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_EEXIST &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_add_rule) {
+			PMD_INIT_LOG(ERR, "The rule has confliction with already existed one");
+			return -EINVAL;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_ENOTFND &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_del_rule) {
+			PMD_INIT_LOG(ERR, "The rule has already deleted");
+			return -EINVAL;
+		} else {
+			PMD_INIT_LOG(ERR, "Invalid rule");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		      struct idpf_ctlq_msg q_msg[])
+{
+	int retries = 0;
+	struct idpf_dma_mem *dma;
+	u16 i;
+	uint16_t buff_cnt;
+	int ret = 0, handle_rule = 0;
+
+	retries = 0;
+	while (retries <= CTLQ_RECEIVE_RETRIES) {
+		rte_delay_us_sleep(10);
+		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+		    ret != CPFL_ERR_CTLQ_ERROR) {
+			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_EMPTY)
+			break;
+
+		ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to process rx_ctrlq msg");
+			handle_rule = ret;
+		}
+
+		for (i = 0; i < num_q_msg; i++) {
+			if (q_msg[i].data_len > 0)
+				dma = q_msg[i].ctx.indirect.payload;
+			else
+				dma = NULL;
+
+			buff_cnt = dma ? 1 : 0;
+			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+			if (ret)
+				PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+		}
+		break;
+	}
+
+	if (retries > CTLQ_RECEIVE_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+		ret = -1;
+	}
+
+	return ret + handle_rule;
+}
+
+static int
+cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		   struct idpf_ctlq_msg *msg)
+{
+	struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	struct cpfl_rule_cfg_data cfg = {0};
+
+	/* prepare rule blob */
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(&cfg, 0, sizeof(cfg));
+
+	/* fill info for both query and add/update */
+	cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+				   minfo->pin_mod_content,
+				   minfo->mod_index,
+				   &cfg.ext.mod_content);
+
+	/* only fill content for add/update */
+	memcpy(blob->mod_blob, minfo->mod_content,
+	       minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+	/* pack message */
+	cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+				       rinfo->cookie,
+				       0, /* vsi_id not used for mod */
+				       rinfo->port_num,
+				       NO_HOST_NEEDED,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       (u16)sizeof(*blob),
+				       (void *)dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		       struct idpf_ctlq_msg *msg, bool add)
+{
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	struct cpfl_rule_cfg_data cfg;
+	uint16_t cfg_ctrl;
+
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(msg, 0, sizeof(*msg));
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+							  rinfo->sem.sub_prof_id,
+							  rinfo->sem.pin_to_cache,
+							  rinfo->sem.fixed_fetch);
+		cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+					rinfo->act_bytes, rinfo->act_byte_len,
+					cfg_ctrl, blob);
+		opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+	} else {
+		PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
+		return -1;
+	}
+
+	cpfl_fill_rule_cfg_data_common(opc,
+				       rinfo->cookie,
+				       rinfo->vsi,
+				       rinfo->port_num,
+				       rinfo->host_id,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+	       struct idpf_ctlq_msg *msg, bool add)
+{
+	int ret = 0;
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
+			ret = -1;
+	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+		if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)
+			ret = -1;
+	} else {
+		PMD_INIT_LOG(ERR, "Invalid type of rule");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+int
+cpfl_rule_process(struct cpfl_itf *itf,
+		  struct idpf_ctlq_info *tx_cq,
+		  struct idpf_ctlq_info *rx_cq,
+		  struct cpfl_rule_info *rinfo,
+		  int rule_num,
+		  bool add)
+{
+	struct idpf_hw *hw = &itf->adapter->base.hw;
+	int i;
+	int ret = 0;
+
+	if (rule_num == 0)
+		return 0;
+
+	for (i = 0; i < rule_num; i++) {
+		ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Could not pack rule");
+			return ret;
+		}
+	}
+	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to send control message");
+		return ret;
+	}
+	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to update rule");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
new file mode 100644
index 0000000000..ed757b80b1
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+	uint16_t prof_id;
+	uint8_t sub_prof_id;
+	uint8_t key[CPFL_MAX_KEY_LEN];
+	uint8_t key_byte_len;
+	uint8_t pin_to_cache;
+	uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+	uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+	uint8_t mod_content_byte_len;
+	uint32_t mod_index;
+	uint8_t pin_mod_content;
+	uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+	CPFL_RULE_TYPE_NONE,
+	CPFL_RULE_TYPE_SEM,
+	CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+	enum cpfl_rule_type type;
+	uint64_t cookie;
+	uint8_t host_id;
+	uint8_t port_num;
+	uint8_t resp_req;
+	/* TODO: change this to be dynamically allocated/reallocated */
+	uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+	uint8_t act_byte_len;
+	/* vsi is used for lem and lpm rules */
+	uint16_t vsi;
+	uint8_t clear_mirror_1st_state;
+	/* mod related fields */
+	union {
+		struct cpfl_mod_rule_info mod;
+		struct cpfl_sem_rule_info sem;
+	};
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_process(struct cpfl_itf *itf,
+		      struct idpf_ctlq_info *tx_cq,
+		      struct idpf_ctlq_info *rx_cq,
+		      struct cpfl_rule_info *rinfo,
+		      int rule_num,
+		      bool add);
+int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		       struct idpf_ctlq_msg q_msg[]);
+int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+			  struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index e2b6621cea..6118a16329 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -45,6 +45,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
     sources += files(
 	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
+	    'cpfl_fxp_rule.c',
     )
     ext_deps += jansson_dep
 endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v8 7/9] net/cpfl: add fxp flow engine
  2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
                               ` (5 preceding siblings ...)
  2023-09-27 12:54             ` [PATCH v8 6/9] net/cpfl: add fxp rule module yuying.zhang
@ 2023-09-27 12:54             ` yuying.zhang
  2023-09-27 12:54             ` [PATCH v8 8/9] net/cpfl: add flow support for representor yuying.zhang
                               ` (3 subsequent siblings)
  10 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-27 12:54 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Adapt a flow engine to FXP implementation.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 doc/guides/nics/cpfl.rst                |  18 +-
 doc/guides/rel_notes/release_23_11.rst  |   1 +
 drivers/net/cpfl/cpfl_ethdev.h          |  27 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 583 ++++++++++++++++++++++++
 drivers/net/cpfl/meson.build            |   1 +
 5 files changed, 628 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index e17347d15c..ae5487f2f6 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -197,8 +197,22 @@ low level hardware resources.
 
     * For Ubuntu, it can be installed using `apt install libjansson-dev`
 
-- run testpmd with the json file
+- run testpmd with the json file, create two vports
 
    .. code-block:: console
 
-   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0-1],flow_parser="refpkg.json" -- -i
+
+#. Create one flow to forward ETH-IPV4-TCP from I/O port to a local(CPF's) vport. Flow should be created on
+   vport X. Group M should match fxp module. Action port_representor Y means forward packet to local vport Y::
+
+   .. code-block:: console
+
+   flow create X ingress group M pattern eth dst is 00:01:00:00:03:14 / ipv4 src is 192.168.0.1 \
+   dst is 192.168.0.2 / tcp / end actions port_representor port_id Y / end
+
+#. Send a matched packet, and it should be displayed on PMD::
+
+   .. code-block:: console
+
+   sendp(Ether(dst='00:01:00:00:03:14')/IP(src='192.168.0.1',dst='192.168.0.2')/TCP(),iface="ens25f0")
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 3d9be208d0..bad71ad3fd 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -81,6 +81,7 @@ New Features
 * **Updated Intel cpfl driver.**
 
   * Added support for port representor.
+  * Added support for rte_flow.
 
 Removed Items
 -------------
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 8eeeac9910..efb0eb5251 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -85,6 +85,8 @@
 
 #define CPFL_RX_CFGQ_NUM	4
 #define CPFL_TX_CFGQ_NUM	4
+#define CPFL_FPCP_CFGQ_TX	0
+#define CPFL_FPCP_CFGQ_RX	1
 #define CPFL_CFGQ_NUM		8
 
 /* bit[15:14] type
@@ -219,6 +221,8 @@ struct cpfl_adapter_ext {
 	struct rte_hash *repr_allowlist_hash;
 
 	struct cpfl_flow_js_parser *flow_parser;
+	struct rte_bitmap *mod_bm;
+	void *mod_bm_mem;
 
 	struct cpfl_metadata meta;
 
@@ -312,4 +316,27 @@ cpfl_get_vsi_id(struct cpfl_itf *itf)
 	return CPFL_INVALID_HW_ID;
 }
 
+static inline struct cpfl_itf *
+cpfl_get_itf_by_port_id(uint16_t port_id)
+{
+	struct rte_eth_dev *dev;
+
+	if (port_id >= RTE_MAX_ETHPORTS) {
+		PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS);
+		return NULL;
+	}
+
+	dev = &rte_eth_devices[port_id];
+	if (dev->state == RTE_ETH_DEV_UNUSED) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id);
+		return NULL;
+	}
+
+	if (!dev->data) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id);
+		return NULL;
+	}
+
+	return CPFL_DEV_TO_ITF(dev);
+}
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
new file mode 100644
index 0000000000..154af5bd35
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <math.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_memcpy.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include <ethdev_driver.h>
+#include "cpfl_rules.h"
+#include "cpfl_logs.h"
+#include "cpfl_ethdev.h"
+#include "cpfl_flow.h"
+#include "cpfl_fxp_rule.h"
+#include "cpfl_flow_parser.h"
+
+#define CPFL_COOKIE_DEF		0x1000
+#define CPFL_MOD_COOKIE_DEF	0x1237561
+#define CPFL_PREC_DEF		1
+#define CPFL_PREC_SET		5
+#define CPFL_TYPE_ID		3
+#define CPFL_OFFSET		0x0a
+#define CPFL_HOST_ID_DEF	0
+#define CPFL_PF_NUM_DEF		0
+#define CPFL_PORT_NUM_DEF	0
+#define CPFL_RESP_REQ_DEF	2
+#define CPFL_PIN_TO_CACHE_DEF	0
+#define CPFL_CLEAR_MIRROR_1ST_STATE_DEF	0
+#define CPFL_FIXED_FETCH_DEF	0
+#define CPFL_PTI_DEF		0
+#define CPFL_MOD_OBJ_SIZE_DEF	0
+#define CPFL_PIN_MOD_CONTENT_DEF	0
+
+#define CPFL_MAX_MOD_CONTENT_INDEX	256
+#define CPFL_MAX_MR_ACTION_NUM	8
+
+/* Struct used when parse detailed rule information with json file */
+struct cpfl_rule_info_meta {
+	struct cpfl_flow_pr_action pr_action;	/* json action field of pattern rule */
+	uint32_t pr_num;			/* number of pattern rules */
+	uint32_t mr_num;			/* number of modification rules */
+	uint32_t rule_num;			/* number of all rules */
+	struct cpfl_rule_info rules[0];
+};
+
+static uint32_t cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad);
+static void cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
+uint64_t cpfl_rule_cookie = CPFL_COOKIE_DEF;
+
+static int
+cpfl_fxp_create(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim = meta;
+	struct cpfl_vport *vport;
+
+	if (!rim)
+		return ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		/* Every vport has one pair control queues configured to handle message.
+		 * Even index is tx queue and odd index is rx queue.
+		 */
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
+				rim->rules, rim->rule_num, true);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "cpfl filter create flow fail");
+		rte_free(rim);
+		return ret;
+	}
+
+	flow->rule = rim;
+
+	return ret;
+}
+
+static inline void
+cpfl_fxp_rule_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+	flow->rule = NULL;
+}
+
+static int
+cpfl_fxp_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim;
+	uint32_t i;
+	struct cpfl_vport *vport;
+
+	rim = flow->rule;
+	if (!rim) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "no such flow create by cpfl filter");
+
+		return -rte_errno;
+	}
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		ret = -rte_errno;
+		goto err;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules,
+				rim->rule_num, false);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to destroy cpfl filter rule");
+		goto err;
+	}
+
+	/* free mod index */
+	for (i = rim->pr_num; i < rim->rule_num; i++)
+		cpfl_fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
+err:
+	cpfl_fxp_rule_free(flow);
+	return ret;
+}
+
+static bool
+cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action,
+		       struct cpfl_rule_info_meta *rim,
+		       int i)
+{
+	if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		struct cpfl_rule_info *rinfo = &rim->rules[i];
+
+		rinfo->type = CPFL_RULE_TYPE_SEM;
+		rinfo->sem.prof_id = pr_action->sem.prof;
+		rinfo->sem.sub_prof_id = pr_action->sem.subprof;
+		rinfo->sem.key_byte_len = pr_action->sem.keysize;
+		rte_memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len);
+		rinfo->sem.pin_to_cache = CPFL_PIN_TO_CACHE_DEF;
+		rinfo->sem.fixed_fetch = CPFL_FIXED_FETCH_DEF;
+	} else {
+		PMD_DRV_LOG(ERR, "Invalid pattern item.");
+		return false;
+	}
+
+	return true;
+}
+
+static int
+cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_rule_info *match_rinfo,
+		       struct cpfl_rule_info *mod_rinfo,
+		       const struct cpfl_flow_mr_action *mr_action)
+{
+	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
+	uint32_t mod_idx;
+	int i;
+	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
+	union cpfl_action_set *act_set =
+		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
+
+	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
+		return -EINVAL;
+
+	*act_set = cpfl_act_mod_profile(CPFL_PREC_DEF,
+					mr_action->mod.prof,
+					CPFL_PTI_DEF,
+					0, /* append */
+					0, /* prepend */
+					CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_idx = cpfl_fxp_mod_idx_alloc(adapter);
+	if (mod_idx == CPFL_MAX_MOD_CONTENT_INDEX) {
+		PMD_DRV_LOG(ERR, "Out of Mod Index.");
+		return -ENOMEM;
+	}
+
+	*act_set = cpfl_act_mod_addr(CPFL_PREC_DEF, mod_idx);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
+	minfo->mod_obj_size = CPFL_MOD_OBJ_SIZE_DEF;
+	minfo->pin_mod_content = CPFL_PIN_MOD_CONTENT_DEF;
+	minfo->mod_index = mod_idx;
+	mod_rinfo->cookie = CPFL_MOD_COOKIE_DEF;
+	mod_rinfo->port_num = CPFL_PORT_NUM_DEF;
+	mod_rinfo->resp_req = CPFL_RESP_REQ_DEF;
+
+	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
+	for (i = 0; i < minfo->mod_content_byte_len; i++)
+		minfo->mod_content[i] = mr_action->mod.data[i];
+
+	return 0;
+}
+
+#define CPFL_FXP_MAX_QREGION_SIZE 128
+#define CPFL_INVALID_QUEUE_ID -2
+static int
+cpfl_fxp_parse_action(struct cpfl_itf *itf,
+		      const struct rte_flow_action *actions,
+		      const struct cpfl_flow_mr_action *mr_action,
+		      struct cpfl_rule_info_meta *rim,
+		      int priority,
+		      int index)
+{
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *rss;
+	struct rte_eth_dev_data *data;
+	enum rte_flow_action_type action_type;
+	struct cpfl_vport *vport;
+	/* used when action is PORT_REPRESENTOR type */
+	struct cpfl_itf *dst_itf;
+	uint16_t dev_id; /* vsi id */
+	int queue_id = -1;
+	bool fwd_vsi = false;
+	bool fwd_q = false;
+	uint32_t i;
+	struct cpfl_rule_info *rinfo = &rim->rules[index];
+	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
+
+	priority = CPFL_PREC_MAX - priority;
+	for (action = actions; action->type !=
+	     RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			if (!fwd_vsi)
+				fwd_vsi = true;
+			else
+				goto err;
+
+			act_ethdev = action->conf;
+			dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id);
+
+			if (!dst_itf)
+				goto err;
+
+			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
+				vport = (struct cpfl_vport *)dst_itf;
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			} else {
+				queue_id = CPFL_INVALID_QUEUE_ID;
+			}
+
+			dev_id = cpfl_get_vsi_id(dst_itf);
+
+			if (dev_id == CPFL_INVALID_HW_ID)
+				goto err;
+
+			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			act_q = action->conf;
+			data = itf->data;
+			if (act_q->index >= data->nb_rx_queues)
+				goto err;
+
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += act_q->index;
+			*act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			if (rss->queue_num <= 1)
+				goto err;
+			for (i = 0; i < rss->queue_num - 1; i++) {
+				if (rss->queue[i + 1] != rss->queue[i] + 1)
+					goto err;
+			}
+			data = itf->data;
+			if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues)
+				goto err;
+			if (!(rte_is_power_of_2(rss->queue_num) &&
+			      rss->queue_num <= CPFL_FXP_MAX_QREGION_SIZE))
+				goto err;
+
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += rss->queue[0];
+			*act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id,
+								  log(rss->queue_num) / log(2), 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			(*act_set).data = cpfl_act_drop(priority).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			(*act_set).data = cpfl_act_set_commit_mode(priority, 0).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			goto err;
+		}
+	}
+
+	if (mr_action) {
+		uint32_t i;
+
+		for (i = 0; i < rim->mr_num; i++)
+			if (cpfl_parse_mod_content(itf->adapter, rinfo,
+						   &rim->rules[rim->pr_num + i],
+						   &mr_action[i]))
+				goto err;
+	}
+
+	return 0;
+
+err:
+	PMD_DRV_LOG(ERR, "Invalid action type");
+	return -EINVAL;
+}
+
+static void
+cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo)
+{
+	if (cpfl_rule_cookie == ~0llu)
+		cpfl_rule_cookie = CPFL_COOKIE_DEF;
+	rinfo->cookie = cpfl_rule_cookie++;
+	rinfo->host_id = CPFL_HOST_ID_DEF;
+	rinfo->port_num = CPFL_PORT_NUM_DEF;
+	rinfo->resp_req = CPFL_RESP_REQ_DEF;
+	rinfo->clear_mirror_1st_state = CPFL_CLEAR_MIRROR_1ST_STATE_DEF;
+}
+
+static bool
+cpfl_is_mod_action(const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			return true;
+		default:
+			continue;
+		}
+	}
+	return false;
+}
+
+static int
+cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      void **meta)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_pr_action pr_action = { 0 };
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_flow_mr_action mr_action[CPFL_MAX_MR_ACTION_NUM] = { 0 };
+	uint32_t pr_num = 0;
+	uint32_t mr_num = 0;
+	struct cpfl_rule_info_meta *rim;
+	int ret;
+
+	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "No Match pattern support.");
+		return -EINVAL;
+	}
+
+	if (cpfl_is_mod_action(actions)) {
+		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "action parse fails.");
+			return -EINVAL;
+		}
+		mr_num++;
+	}
+
+	pr_num = 1;
+	rim = rte_zmalloc(NULL,
+			  sizeof(struct cpfl_rule_info_meta) +
+			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
+			  0);
+	if (!rim)
+		return -ENOMEM;
+
+	rim->pr_action = pr_action;
+	rim->pr_num = pr_num;
+	rim->mr_num = mr_num;
+	rim->rule_num = pr_num + mr_num;
+
+	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid pattern");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid action");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	cpfl_fill_rinfo_default_value(&rim->rules[0]);
+
+	if (!meta)
+		rte_free(rim);
+	else
+		*meta = rim;
+
+	return 0;
+}
+
+static int
+cpfl_fxp_mod_init(struct cpfl_adapter_ext *ad)
+{
+	uint32_t size = rte_bitmap_get_memory_footprint(CPFL_MAX_MOD_CONTENT_INDEX);
+	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+	if (!mem)
+		return -ENOMEM;
+
+	/* a set bit represent a free slot */
+	ad->mod_bm = rte_bitmap_init_with_all_set(CPFL_MAX_MOD_CONTENT_INDEX, mem, size);
+	if (!ad->mod_bm) {
+		rte_free(mem);
+		return -EINVAL;
+	}
+
+	ad->mod_bm_mem = mem;
+
+	return 0;
+}
+
+static void
+cpfl_fxp_mod_uninit(struct cpfl_adapter_ext *ad)
+{
+	rte_free(ad->mod_bm_mem);
+	ad->mod_bm_mem = NULL;
+	ad->mod_bm = NULL;
+}
+
+static uint32_t
+cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad)
+{
+	uint64_t slab = 0;
+	uint32_t pos = 0;
+
+	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
+		return CPFL_MAX_MOD_CONTENT_INDEX;
+
+	pos += __builtin_ffsll(slab) - 1;
+	rte_bitmap_clear(ad->mod_bm, pos);
+
+	return pos;
+}
+
+static void
+cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
+{
+	rte_bitmap_set(ad->mod_bm, idx);
+}
+
+static int
+cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
+	       struct rte_flow *flow __rte_unused,
+	       struct rte_flow_query_count *count __rte_unused,
+	       struct rte_flow_error *error)
+{
+	rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_HANDLE,
+			   NULL,
+			   "count action not supported by this module");
+
+	return -rte_errno;
+}
+
+static void
+cpfl_fxp_uninit(struct cpfl_adapter_ext *ad)
+{
+	cpfl_fxp_mod_uninit(ad);
+}
+
+static int
+cpfl_fxp_init(struct cpfl_adapter_ext *ad)
+{
+	int ret = 0;
+
+	ret = cpfl_fxp_mod_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct
+cpfl_flow_engine cpfl_fxp_engine = {
+	.type = CPFL_FLOW_ENGINE_FXP,
+	.init = cpfl_fxp_init,
+	.uninit = cpfl_fxp_uninit,
+	.create = cpfl_fxp_create,
+	.destroy = cpfl_fxp_destroy,
+	.query_count = cpfl_fxp_query,
+	.parse_pattern_action = cpfl_fxp_parse_pattern_action,
+};
+
+RTE_INIT(cpfl_sw_engine_init)
+{
+	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
+
+	cpfl_flow_engine_register(engine);
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 6118a16329..5fd1cbd045 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -46,6 +46,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
 	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
 	    'cpfl_fxp_rule.c',
+	    'cpfl_flow_engine_fxp.c',
     )
     ext_deps += jansson_dep
 endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v8 8/9] net/cpfl: add flow support for representor
  2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
                               ` (6 preceding siblings ...)
  2023-09-27 12:54             ` [PATCH v8 7/9] net/cpfl: add fxp flow engine yuying.zhang
@ 2023-09-27 12:54             ` yuying.zhang
  2023-09-27 12:54             ` [PATCH v8 9/9] net/cpfl: add support of to represented port action yuying.zhang
                               ` (2 subsequent siblings)
  10 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-27 12:54 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add flow support for representor, so representor can
create, destroy, validate and flush rules.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 74 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.c     | 29 ++++++++++
 2 files changed, 103 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index 154af5bd35..c460e6b5c6 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -73,6 +73,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 	struct cpfl_adapter_ext *ad = itf->adapter;
 	struct cpfl_rule_info_meta *rim = meta;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	if (!rim)
 		return ret;
@@ -83,6 +84,10 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 		 * Even index is tx queue and odd index is rx queue.
 		 */
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -122,6 +127,7 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	uint32_t i;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	rim = flow->rule;
 	if (!rim) {
@@ -135,6 +141,10 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	if (itf->type == CPFL_ITF_TYPE_VPORT) {
 		vport = (struct cpfl_vport *)itf;
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -414,6 +424,64 @@ cpfl_is_mod_action(const struct rte_flow_action actions[])
 	return false;
 }
 
+static bool
+cpfl_fxp_get_metadata_port(struct cpfl_itf *itf,
+			   const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	const struct rte_flow_action_ethdev *ethdev;
+	struct cpfl_itf *target_itf;
+	bool ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		ret = cpfl_metadata_write_port_id(itf);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "fail to write port id");
+			return false;
+		}
+	}
+
+	ret = cpfl_metadata_write_sourcevsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write source vsi id");
+		return false;
+	}
+
+	ret = cpfl_metadata_write_vsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write vsi id");
+		return false;
+	}
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			ethdev = (const struct rte_flow_action_ethdev *)action->conf;
+			target_itf = cpfl_get_itf_by_port_id(ethdev->port_id);
+			if (!target_itf) {
+				PMD_DRV_LOG(ERR, "fail to get target_itf by port id");
+				return false;
+			}
+			ret = cpfl_metadata_write_targetvsi(target_itf);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "fail to write target vsi id");
+				return false;
+			}
+			break;
+		default:
+			continue;
+		}
+	}
+
+	return true;
+}
+
 static int
 cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
@@ -430,6 +498,12 @@ cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	int ret;
 
+	ret = cpfl_fxp_get_metadata_port(itf, actions);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Fail to save metadata.");
+		return -EINVAL;
+	}
+
 	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "No Match pattern support.");
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 4d15a26c80..de3b426727 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,8 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 static int
 cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
@@ -374,6 +376,22 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
+static int
+cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev,
+			   const struct rte_flow_ops **ops)
+{
+	if (!dev)
+		return -EINVAL;
+
+#ifdef RTE_HAS_JANSSON
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -385,6 +403,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
 
 	.link_update		= cpfl_repr_link_update,
+	.flow_ops_get		= cpfl_dev_repr_flow_ops_get,
 };
 
 static int
@@ -393,6 +412,7 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
 	struct cpfl_repr_param *param = init_param;
 	struct cpfl_adapter_ext *adapter = param->adapter;
+	int ret;
 
 	repr->repr_id = param->repr_id;
 	repr->vport_info = param->vport_info;
@@ -402,6 +422,15 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
 		repr->func_up = true;
 
+	TAILQ_INIT(&repr->itf.flow_list);
+	memset(repr->itf.dma, 0, sizeof(repr->itf.dma));
+	memset(repr->itf.msg, 0, sizeof(repr->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		return ret;
+
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v8 9/9] net/cpfl: add support of to represented port action
  2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
                               ` (7 preceding siblings ...)
  2023-09-27 12:54             ` [PATCH v8 8/9] net/cpfl: add flow support for representor yuying.zhang
@ 2023-09-27 12:54             ` yuying.zhang
  2023-09-28  3:37             ` [PATCH v8 0/9] add rte flow support for cpfl Zhang, Qi Z
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
  10 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-27 12:54 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add support of to represented port action for forwarding
packet to APF/CPF/VF representors.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index c460e6b5c6..fed18d8349 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -267,6 +267,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 	int queue_id = -1;
 	bool fwd_vsi = false;
 	bool fwd_q = false;
+	bool is_vsi;
 	uint32_t i;
 	struct cpfl_rule_info *rinfo = &rim->rules[index];
 	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
@@ -277,6 +278,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 		action_type = action->type;
 		switch (action_type) {
 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
 			if (!fwd_vsi)
 				fwd_vsi = true;
 			else
@@ -295,12 +297,20 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 				queue_id = CPFL_INVALID_QUEUE_ID;
 			}
 
-			dev_id = cpfl_get_vsi_id(dst_itf);
+			is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
+				  dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR);
+			if (is_vsi)
+				dev_id = cpfl_get_vsi_id(dst_itf);
+			else
+				dev_id = cpfl_get_port_id(dst_itf);
 
 			if (dev_id == CPFL_INVALID_HW_ID)
 				goto err;
 
-			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			if (is_vsi)
+				*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			else
+				*act_set = cpfl_act_fwd_port(0, priority, 0, dev_id);
 			act_set++;
 			rinfo->act_byte_len += sizeof(union cpfl_action_set);
 			break;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v7 6/8] net/cpfl: add fxp rule module
  2023-09-26 18:17           ` [PATCH v7 6/8] net/cpfl: add fxp rule module yuying.zhang
@ 2023-09-28  3:29             ` Zhang, Qi Z
  0 siblings, 0 replies; 128+ messages in thread
From: Zhang, Qi Z @ 2023-09-28  3:29 UTC (permalink / raw)
  To: Zhang, Yuying, dev, Wu, Jingjing, Xing, Beilei



> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Wednesday, September 27, 2023 2:17 AM
> To: Zhang, Yuying <yuying.zhang@intel.com>; dev@dpdk.org; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v7 6/8] net/cpfl: add fxp rule module
> 
> From: Yuying Zhang <yuying.zhang@intel.com>
> 
> Implement FXP rule creation / destroying.
> 
> Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.c   |  31 ++++
>  drivers/net/cpfl/cpfl_ethdev.h   |   6 +
>  drivers/net/cpfl/cpfl_fxp_rule.c | 296 +++++++++++++++++++++++++++++++
> drivers/net/cpfl/cpfl_fxp_rule.h |  68 +++++++
>  drivers/net/cpfl/meson.build     |   1 +
>  5 files changed, 402 insertions(+)
>  create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c  create mode 100644
> drivers/net/cpfl/cpfl_fxp_rule.h
> 
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
> index a2bc6784d0..da78e79652 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -16,6 +16,7 @@
>  #include <ethdev_private.h>
>  #include "cpfl_rxtx.h"
>  #include "cpfl_flow.h"
> +#include "cpfl_rules.h"
> 
>  #define CPFL_REPRESENTOR	"representor"
>  #define CPFL_TX_SINGLE_Q	"tx_single"
> @@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
>  	adapter->cur_vport_nb--;
>  	dev->data->dev_private = NULL;
>  	adapter->vports[vport->sw_idx] = NULL;
> +	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
>  	rte_free(cpfl_vport);
> 
>  	return 0;
> @@ -2466,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport
> *cpfl_vport,
>  	return 0;
>  }
> 
> +int
> +cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct
> idpf_dma_mem *dma, uint32_t size,
> +			 int batch_size)
> +{
> +	int i;
> +
> +	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
> +		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
> +		return -ENOMEM;
> +	}
> +
> +	for (i = 0; i < batch_size; i++) {
> +		dma[i].va = (void *)((uint64_t)orig_dma->va + size * (i + 1));
> +		dma[i].pa = orig_dma->pa + size * (i + 1);
> +		dma[i].size = size;
> +		dma[i].zone = NULL;
> +	}
> +	return 0;
> +}
> +
>  static int
>  cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)  { @@ -
> 2515,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void
> *init_params)
>  	rte_ether_addr_copy((struct rte_ether_addr *)vport-
> >default_mac_addr,
>  			    &dev->data->mac_addrs[0]);
> 
> +	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
> +	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
> +	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
> +				       cpfl_vport->itf.dma,
> +				       sizeof(union cpfl_rule_cfg_pkt_record),
> +				       CPFL_FLOW_BATCH_SIZE);
> +	if (ret < 0)
> +		goto err_mac_addrs;
> +
>  	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
>  		memset(&p2p_queue_grps_info, 0,
> sizeof(p2p_queue_grps_info));
>  		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info,
> p2p_q_vc_out_info); diff --git a/drivers/net/cpfl/cpfl_ethdev.h
> b/drivers/net/cpfl/cpfl_ethdev.h index 7f83d170d7..8eeeac9910 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -147,10 +147,14 @@ enum cpfl_itf_type {
> 
>  TAILQ_HEAD(cpfl_flow_list, rte_flow);
> 
> +#define CPFL_FLOW_BATCH_SIZE  490
>  struct cpfl_itf {
>  	enum cpfl_itf_type type;
>  	struct cpfl_adapter_ext *adapter;
>  	struct cpfl_flow_list flow_list;
> +	struct idpf_dma_mem flow_dma;
> +	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
> +	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
>  	void *data;
>  };
> 
> @@ -240,6 +244,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext
> *adapter,  int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);  int
> cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);  int
> cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
> +int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct
> idpf_dma_mem *dma,
> +			     uint32_t size, int batch_size);
> 
>  #define CPFL_DEV_TO_PCI(eth_dev)		\
>  	RTE_DEV_TO_PCI((eth_dev)->device)
> diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
> new file mode 100644
> index 0000000000..50fac55432
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_fxp_rule.c
> @@ -0,0 +1,296 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 Intel Corporation
> + */
> +#include "cpfl_ethdev.h"
> +
> +#include "cpfl_fxp_rule.h"
> +#include "cpfl_logs.h"
> +
> +#define CTLQ_SEND_RETRIES 100
> +#define CTLQ_RECEIVE_RETRIES 100
> +
> +int
> +cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16
> num_q_msg,
> +		   struct idpf_ctlq_msg q_msg[])
> +{
> +	struct idpf_ctlq_msg **msg_ptr_list;
> +	u16 clean_count = 0;
> +	int num_cleaned = 0;
> +	int retries = 0;
> +	int ret = 0;
> +
> +	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
> +	if (!msg_ptr_list) {
> +		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
> +		ret = -ENOMEM;
> +		goto err;
> +	}
> +
> +	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
> +	if (ret) {
> +		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with
> error: 0x%4x", ret);
> +		goto send_err;
> +	}
> +
> +	while (retries <= CTLQ_SEND_RETRIES) {
> +		clean_count = num_q_msg - num_cleaned;
> +		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
> +					       &msg_ptr_list[num_cleaned]);
> +		if (ret) {
> +			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
> +			goto send_err;
> +		}
> +
> +		num_cleaned += clean_count;
> +		retries++;
> +		if (num_cleaned >= num_q_msg)
> +			break;
> +		rte_delay_us_sleep(10);
> +	}
> +
> +	if (retries > CTLQ_SEND_RETRIES) {
> +		PMD_INIT_LOG(ERR, "timed out while polling for
> completions");
> +		ret = -1;
> +		goto send_err;
> +	}
> +
> +send_err:
> +	if (msg_ptr_list)
> +		free(msg_ptr_list);
> +err:
> +	return ret;
> +}
> +
> +static int
> +cpfl_process_rx_ctlq_msg(u16 num_q_msg, struct idpf_ctlq_msg *q_msg) {
> +	u16 i;
> +
> +	if (!num_q_msg || !q_msg)
> +		return -EINVAL;
> +
> +	for (i = 0; i < num_q_msg; i++) {
> +		if (q_msg[i].status == CPFL_CFG_PKT_ERR_OK) {
> +			continue;
> +		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_EEXIST &&
> +			   q_msg[i].opcode == cpfl_ctlq_sem_add_rule) {
> +			PMD_INIT_LOG(ERR, "The rule has confliction with
> already existed one");
> +			return -EINVAL;
> +		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_ENOTFND &&
> +			   q_msg[i].opcode == cpfl_ctlq_sem_del_rule) {
> +			PMD_INIT_LOG(ERR, "The rule has already deleted");
> +			return -EINVAL;
> +		} else {
> +			PMD_INIT_LOG(ERR, "Invalid rule");
> +			return -EINVAL;
> +		}

Please fix checkpatch warning due to unnecessary else.



^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v8 0/9] add rte flow support for cpfl
  2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
                               ` (8 preceding siblings ...)
  2023-09-27 12:54             ` [PATCH v8 9/9] net/cpfl: add support of to represented port action yuying.zhang
@ 2023-09-28  3:37             ` Zhang, Qi Z
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
  10 siblings, 0 replies; 128+ messages in thread
From: Zhang, Qi Z @ 2023-09-28  3:37 UTC (permalink / raw)
  To: Zhang, Yuying, dev, Wu, Jingjing, Xing, Beilei



> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Wednesday, September 27, 2023 8:54 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; dev@dpdk.org; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v8 0/9] add rte flow support for cpfl
> 
> From: Yuying Zhang <yuying.zhang@intel.com>
> 
> This patchset add rte flow support for cpfl driver.
> It depends on the following patch set:
> http://patchwork.dpdk.org/project/dpdk/cover/20230912173039.1612287-1-
> beilei.xing@intel.com/
> 
> Wenjing Qiao (2):
>   net/cpfl: add json parser for rte flow pattern rules
>   net/cpfl: build action mapping rules from JSON
> 
> Yuying Zhang (7):
>   net/cpfl: set up rte flow skeleton
>   net/cpfl: set up control path
>   net/cpfl: add FXP low level implementation
>   net/cpfl: add fxp rule module
>   net/cpfl: add fxp flow engine
>   net/cpfl: add flow support for representor
>   net/cpfl: add support of to represented port action
> ---
> v8:
> * fix compile issues
> * refine document and separate patch with different features
> v7:
> * refine commit log
> * fix compile issues
> 
> v6:
> * use existed jansson instead of json-c library.
> * refine "add FXP low level implementation"
> 
> V5:
> * Add input validation for some functions.
> 
>  doc/guides/nics/cpfl.rst                |   52 +
>  doc/guides/rel_notes/release_23_11.rst  |    1 +
>  drivers/net/cpfl/cpfl_actions.h         |  858 +++++++++++
>  drivers/net/cpfl/cpfl_controlq.c        |  801 ++++++++++
>  drivers/net/cpfl/cpfl_controlq.h        |   75 +
>  drivers/net/cpfl/cpfl_ethdev.c          |  392 ++++-
>  drivers/net/cpfl/cpfl_ethdev.h          |  128 ++
>  drivers/net/cpfl/cpfl_flow.c            |  339 +++++
>  drivers/net/cpfl/cpfl_flow.h            |   85 ++
>  drivers/net/cpfl/cpfl_flow_engine_fxp.c |  667 ++++++++
>  drivers/net/cpfl/cpfl_flow_parser.c     | 1839 +++++++++++++++++++++++
>  drivers/net/cpfl/cpfl_flow_parser.h     |  267 ++++
>  drivers/net/cpfl/cpfl_fxp_rule.c        |  296 ++++
>  drivers/net/cpfl/cpfl_fxp_rule.h        |   68 +
>  drivers/net/cpfl/cpfl_representor.c     |   29 +
>  drivers/net/cpfl/cpfl_rules.c           |  127 ++
>  drivers/net/cpfl/cpfl_rules.h           |  306 ++++
>  drivers/net/cpfl/cpfl_vchnl.c           |  144 ++
>  drivers/net/cpfl/meson.build            |   12 +
>  19 files changed, 6485 insertions(+), 1 deletion(-)  create mode 100644
> drivers/net/cpfl/cpfl_actions.h  create mode 100644
> drivers/net/cpfl/cpfl_controlq.c  create mode 100644
> drivers/net/cpfl/cpfl_controlq.h  create mode 100644
> drivers/net/cpfl/cpfl_flow.c  create mode 100644 drivers/net/cpfl/cpfl_flow.h
> create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
>  create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
>  create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
>  create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c  create mode 100644
> drivers/net/cpfl/cpfl_fxp_rule.h  create mode 100644
> drivers/net/cpfl/cpfl_rules.c  create mode 100644 drivers/net/cpfl/cpfl_rules.h
> 
> --
> 2.34.1

Please fix one checkpatch warning on PATCH 6 and also rebase to latest dpdk-next-net-intel in new version.

Otherwise Acked-by: Qi Zhang <qi.z.zhang@intel.com>




^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v9 0/9] add rte flow support for cpfl
  2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
                               ` (9 preceding siblings ...)
  2023-09-28  3:37             ` [PATCH v8 0/9] add rte flow support for cpfl Zhang, Qi Z
@ 2023-09-28  8:44             ` yuying.zhang
  2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
                                 ` (13 more replies)
  10 siblings, 14 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-28  8:44 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

This patchset add rte flow support for cpfl driver.
It depends on the following patch set:
http://patchwork.dpdk.org/project/dpdk/cover/20230912173039.1612287-1-beilei.xing@intel.com/

Wenjing Qiao (2):
  net/cpfl: add json parser for rte flow pattern rules
  net/cpfl: build action mapping rules from JSON

Yuying Zhang (7):
  net/cpfl: set up rte flow skeleton
  net/cpfl: set up control path
  net/cpfl: add FXP low level implementation
  net/cpfl: add fxp rule module
  net/cpfl: add fxp flow engine
  net/cpfl: add flow support for representor
  net/cpfl: add support of to represented port action
---
v9:
* refine rx queue message process function

v8:
* fix compile issues
* refine document and separate patch with different features

v7:
* refine commit log
* fix compile issues

v6:
* use existed jansson instead of json-c library.
* refine "add FXP low level implementation"

V5:
* Add input validation for some functions.

 doc/guides/nics/cpfl.rst                |   52 +
 doc/guides/rel_notes/release_23_11.rst  |    1 +
 drivers/net/cpfl/cpfl_actions.h         |  858 +++++++++++
 drivers/net/cpfl/cpfl_controlq.c        |  801 ++++++++++
 drivers/net/cpfl/cpfl_controlq.h        |   75 +
 drivers/net/cpfl/cpfl_ethdev.c          |  392 ++++-
 drivers/net/cpfl/cpfl_ethdev.h          |  128 ++
 drivers/net/cpfl/cpfl_flow.c            |  339 +++++
 drivers/net/cpfl/cpfl_flow.h            |   85 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c |  667 ++++++++
 drivers/net/cpfl/cpfl_flow_parser.c     | 1839 +++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h     |  267 ++++
 drivers/net/cpfl/cpfl_fxp_rule.c        |  263 ++++
 drivers/net/cpfl/cpfl_fxp_rule.h        |   68 +
 drivers/net/cpfl/cpfl_representor.c     |   29 +
 drivers/net/cpfl/cpfl_rules.c           |  127 ++
 drivers/net/cpfl/cpfl_rules.h           |  306 ++++
 drivers/net/cpfl/cpfl_vchnl.c           |  144 ++
 drivers/net/cpfl/meson.build            |   12 +
 19 files changed, 6452 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v9 1/9] net/cpfl: add json parser for rte flow pattern rules
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
  2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
@ 2023-09-28  8:44               ` yuying.zhang
  2023-09-28  8:44               ` [PATCH v9 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
                                 ` (11 subsequent siblings)
  13 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-28  8:44 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Add devargs "flow_parser" for rte flow json parser which depends
on jansson library.

Example:
    -a ca:00.0,flow_parser="refpkg.json"

Add json parser for rte flow pattern rules which can build rules
that maps from a set of rte flow items to hardware representations.

The cpfl PMD supports utilizing a JSON configuration file to translate
rte flow tokens into low level hardware resources. The JSON configuration
file is provided by the hardware vendor and is intended to work exclusively
with a specific P4 pipeline configuration, which must be compiled and
programmed into the hardware.

The format of the JSON file strictly follows the internal specifications
of the hardware vendor and is not meant to be modified directly by
users.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 doc/guides/nics/cpfl.rst            |   38 +
 drivers/net/cpfl/cpfl_ethdev.c      |   38 +-
 drivers/net/cpfl/cpfl_ethdev.h      |   76 ++
 drivers/net/cpfl/cpfl_flow_parser.c | 1303 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  167 ++++
 drivers/net/cpfl/meson.build        |    7 +
 6 files changed, 1628 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 83a18c3f2e..e17347d15c 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -128,12 +128,32 @@ Runtime Configuration
 
     -a BDF,representor=vf[0-3],representor=c1pf1
 
+- ``flow_parser`` (default ``not enabled``)
+
+  The cpfl PMD supports utilizing a JSON config file to translate rte_flow tokens into
+  low-level hardware resources.
+
+  The JSON configuration file is provided by the hardware vendor and is intended to work
+  exclusively with a specific P4 pipeline configuration, which must be compiled and programmed
+  into the hardware.
+
+  The format of the JSON file strictly follows the internal specifications of the hardware
+  vendor and is not meant to be modified directly by users.
+
+  Using the ``devargs`` option ``flow_parser`` the user can specify the path
+  of a json file, for example::
+
+    -a ca:00.0,flow_parser="refpkg.json"
+
+  Then the PMD will load json file for device ``ca:00.0``.
+  The parameter is optional.
 
 Driver compilation and testing
 ------------------------------
 
 Refer to the document :doc:`build_and_test` for details.
 
+The jansson library must be installed to use rte_flow.
 
 Features
 --------
@@ -164,3 +184,21 @@ Hairpin queue
 E2100 Series can loopback packets from RX port to TX port.
 This feature is called port-to-port or hairpin.
 Currently, the PMD only supports single port hairpin.
+
+Rte_flow
+~~~~~~~~~~~~~
+
+PMD uses a json file to direct CPF PMD to parse rte_flow tokens into
+low level hardware resources.
+
+- Required Libraries
+
+  * jansson
+
+    * For Ubuntu, it can be installed using `apt install libjansson-dev`
+
+- run testpmd with the json file
+
+   .. code-block:: console
+
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 189072ab33..1745f703c8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -21,6 +21,10 @@
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
 
+#ifdef RTE_HAS_JANSSON
+#define CPFL_FLOW_PARSER	"flow_parser"
+#endif
+
 rte_spinlock_t cpfl_adapter_lock;
 /* A list for all adapters, one adapter matches one PCI device */
 struct cpfl_adapter_list cpfl_adapter_list;
@@ -31,6 +35,9 @@ static const char * const cpfl_valid_args_first[] = {
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
+#ifdef RTE_HAS_JANSSON
+	CPFL_FLOW_PARSER,
+#endif
 	NULL
 };
 
@@ -1537,6 +1544,24 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 	return 0;
 }
 
+#ifdef RTE_HAS_JANSSON
+static int
+parse_file(const char *key, const char *value, void *args)
+{
+	char *name = args;
+
+	if (strlen(value) > CPFL_FLOW_FILE_LEN - 1) {
+		PMD_DRV_LOG(ERR, "file path(%s) is too long.", value);
+		return -1;
+	}
+
+	PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key);
+	strlcpy(name, value, CPFL_FLOW_FILE_LEN);
+
+	return 0;
+}
+#endif
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
@@ -1585,7 +1610,18 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 				 &adapter->base.is_rx_singleq);
 	if (ret != 0)
 		goto fail;
-
+#ifdef RTE_HAS_JANSSON
+	if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) {
+		ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER,
+					 &parse_file, cpfl_args->flow_parser);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to parser flow_parser, ret: %d", ret);
+			goto fail;
+		}
+	} else {
+		cpfl_args->flow_parser[0] = '\0';
+	}
+#endif
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index d0dcc0cc05..383dbd14c6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -77,6 +77,11 @@
 #define CPFL_VPORT_LAN_PF	0
 #define CPFL_VPORT_LAN_VF	1
 
+#define CPFL_FLOW_FILE_LEN 100
+#define CPFL_INVALID_HW_ID	UINT16_MAX
+#define CPFL_META_CHUNK_LENGTH	1024
+#define CPFL_META_LENGTH	32
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -99,6 +104,7 @@ struct cpfl_devargs {
 	uint16_t req_vport_nb;
 	uint8_t repr_args_num;
 	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
+	char flow_parser[CPFL_FLOW_FILE_LEN];
 };
 
 struct p2p_queue_chunks_info {
@@ -165,6 +171,20 @@ struct cpfl_repr {
 	bool func_up; /* If the represented function is up */
 };
 
+struct cpfl_metadata_chunk {
+	int type;
+	uint8_t data[CPFL_META_CHUNK_LENGTH];
+};
+
+/**
+ * It is driver's responsibility to simlulate a metadata buffer which
+ * can be used as data source to fill the key of a flow rule.
+ */
+struct cpfl_metadata {
+	int length;
+	struct cpfl_metadata_chunk chunks[CPFL_META_LENGTH];
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -185,6 +205,8 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
+
+	struct cpfl_metadata meta;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -211,4 +233,58 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
+static inline uint16_t
+cpfl_get_port_id(struct cpfl_itf *itf)
+{
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		struct cpfl_vport *vport = (void *)itf;
+
+		return vport->base.devarg_id;
+	}
+
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline uint16_t
+cpfl_get_vsi_id(struct cpfl_itf *itf)
+{
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_vport_info *info;
+	uint32_t vport_id;
+	int ret;
+	struct cpfl_vport_id vport_identity;
+
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		struct cpfl_repr *repr = (void *)itf;
+
+		return repr->vport_info->vport.info.vsi_id;
+	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
+
+		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		/* host: CPFL_HOST0_CPF_ID, acc: CPFL_ACC_CPF_ID */
+		vport_identity.pf_id = CPFL_ACC_CPF_ID;
+		vport_identity.vf_id = 0;
+		vport_identity.vport_id = vport_id;
+		ret = rte_hash_lookup_data(adapter->vport_map_hash,
+					   &vport_identity,
+					   (void **)&info);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "vport id not exist");
+			goto err;
+		}
+
+		return info->vport.info.vsi_id;
+	}
+
+err:
+	return CPFL_INVALID_HW_ID;
+}
+
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
new file mode 100644
index 0000000000..308f9c9736
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -0,0 +1,1303 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <arpa/inet.h>
+#include <asm-generic/errno-base.h>
+#include <stdint.h>
+
+#include "cpfl_flow_parser.h"
+#include "cpfl_ethdev.h"
+#include "rte_malloc.h"
+
+static enum rte_flow_item_type
+cpfl_get_item_type_by_str(const char *type)
+{
+	if (strcmp(type, "eth") == 0)
+		return RTE_FLOW_ITEM_TYPE_ETH;
+	else if (strcmp(type, "ipv4") == 0)
+		return RTE_FLOW_ITEM_TYPE_IPV4;
+	else if (strcmp(type, "tcp") == 0)
+		return RTE_FLOW_ITEM_TYPE_TCP;
+	else if (strcmp(type, "udp") == 0)
+		return RTE_FLOW_ITEM_TYPE_UDP;
+	else if (strcmp(type, "vxlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VXLAN;
+	else if (strcmp(type, "icmp") == 0)
+		return RTE_FLOW_ITEM_TYPE_ICMP;
+	else if (strcmp(type, "vlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VLAN;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ITEM_TYPE_VOID;
+}
+
+static const char *
+cpfl_json_t_to_string(json_t *object, const char *name)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return NULL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return NULL;
+	}
+
+	return json_string_value(subobject);
+}
+
+static int
+cpfl_json_t_to_uint16(json_t *object, const char *name, uint16_t *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (uint16_t)json_integer_value(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_json_t_to_uint32(json_t *object, const char *name, uint32_t *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (uint32_t)json_integer_value(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_attr(json_t *ob_pr_key_attrs, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len;
+	struct cpfl_flow_js_pr_key_attr *attr;
+
+	len = json_array_size(ob_pr_key_attrs);
+	js_pr->key.attributes = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_attr), 0);
+	if (!js_pr->key.attributes) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->key.attr_size = len;
+	attr = js_pr->key.attributes;
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		const char *name;
+		uint16_t value = 0;
+		int ret;
+
+		object = json_array_get(ob_pr_key_attrs, i);
+		name = cpfl_json_t_to_string(object, "Name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'Name'.");
+			goto err;
+		}
+		ret = cpfl_json_t_to_uint16(object, "Value", &value);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+			goto err;
+		}
+		if (strcmp(name, "ingress") == 0) {
+			attr->ingress = value;
+		} else if (strcmp(name, "egress") == 0) {
+			attr->egress = value;
+		} else {
+			/* TODO: more... */
+			PMD_DRV_LOG(ERR, "Not support attr name: %s.", name);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	rte_free(js_pr->key.attributes);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto_field(json_t *ob_fields,
+				     struct cpfl_flow_js_pr_key_proto *js_field)
+{
+	int len, i;
+
+	if (!ob_fields)
+		return 0;
+	len = json_array_size(ob_fields);
+	if (len == 0)
+		return 0;
+	js_field->fields_size = len;
+	js_field->fields =
+	    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len, 0);
+	if (!js_field->fields) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		const char *name, *mask;
+
+		object = json_array_get(ob_fields, i);
+		name = cpfl_json_t_to_string(object, "name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
+			goto err;
+		}
+		if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+			PMD_DRV_LOG(ERR, "The 'name' is too long.");
+			goto err;
+		}
+		memcpy(js_field->fields[i].name, name, strlen(name));
+
+		if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
+		    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			mask = cpfl_json_t_to_string(object, "mask");
+			if (!mask) {
+				PMD_DRV_LOG(ERR, "Can not parse string 'mask'.");
+				goto err;
+			}
+			if (strlen(mask) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+				PMD_DRV_LOG(ERR, "The 'mask' is too long.");
+				goto err;
+			}
+			memcpy(js_field->fields[i].mask, mask, strlen(mask));
+		} else {
+			uint32_t mask_32b;
+			int ret;
+
+			ret = cpfl_json_t_to_uint32(object, "mask", &mask_32b);
+			if (ret < 0) {
+				PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'.");
+				goto err;
+			}
+			js_field->fields[i].mask_32b = mask_32b;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_field->fields);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto(json_t *ob_pr_key_protos, struct cpfl_flow_js_pr *js_pr)
+{
+	int len, i, ret;
+
+	len = json_array_size(ob_pr_key_protos);
+	if (len == 0)
+		return 0;
+	js_pr->key.proto_size = len;
+	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto) * len, 0);
+	if (!js_pr->key.protocols) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_t *object, *ob_fields;
+		const char *type;
+		enum rte_flow_item_type item_type;
+
+		object = json_array_get(ob_pr_key_protos, i);
+		/* pr->key->proto->type */
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		item_type = cpfl_get_item_type_by_str(type);
+		if (item_type == RTE_FLOW_ITEM_TYPE_VOID)
+			goto err;
+		js_pr->key.protocols[i].type = item_type;
+		/* pr->key->proto->fields */
+		ob_fields = json_object_get(object, "fields");
+		ret = cpfl_flow_js_pattern_key_proto_field(ob_fields,
+							   &js_pr->key.protocols[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	rte_free(js_pr->key.protocols);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_proto(json_t *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	uint16_t layer = 0, offset = 0, mask = 0;
+	const char *header;
+	enum rte_flow_item_type type;
+	int ret;
+
+	ret = cpfl_json_t_to_uint16(ob_value, "layer", &layer);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+		return -EINVAL;
+	}
+
+	header = cpfl_json_t_to_string(ob_value, "header");
+	if (!header) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "offset", &offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "mask", &mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
+		return -EINVAL;
+	}
+	type = cpfl_get_item_type_by_str(header);
+	if (type == RTE_FLOW_ITEM_TYPE_VOID)
+		return -EINVAL;
+	js_fv->proto.layer = layer;
+	js_fv->proto.offset = offset;
+	js_fv->proto.mask = mask;
+	js_fv->proto.header = type;
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_metadata(json_t *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	int ret;
+
+	ret = cpfl_json_t_to_uint16(ob_value, "type", &js_fv->meta.type);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "offset", &js_fv->meta.offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "mask", &js_fv->meta.mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv(json_t *ob_fvs, struct cpfl_flow_js_pr_action *js_act)
+{
+	int len, i;
+
+	len = json_array_size(ob_fvs);
+	if (len == 0)
+		return 0;
+	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0);
+	if (!js_act->sem.fv) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_act->sem.fv_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_fv *js_fv;
+		json_t *object, *ob_value;
+		uint16_t offset = 0;
+		const char *type;
+		int ret;
+
+		js_fv = &js_act->sem.fv[i];
+		object = json_array_get(ob_fvs, i);
+		ret = cpfl_json_t_to_uint16(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_fv->offset = offset;
+
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		ob_value = json_object_get(object, "value");
+		if (strcmp(type, "immediate") == 0) {
+			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
+			js_fv->immediate = json_integer_value(ob_value);
+		} else if (strcmp(type, "metadata") == 0) {
+			js_fv->type = CPFL_FV_TYPE_METADATA;
+			cpfl_flow_js_pattern_act_fv_metadata(ob_value, js_fv);
+		} else if (strcmp(type, "protocol") == 0) {
+			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
+			cpfl_flow_js_pattern_act_fv_proto(ob_value, js_fv);
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_act->sem.fv);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_per_act(json_t *ob_per_act, struct cpfl_flow_js_pr_action *js_act)
+{
+	const char *type;
+	int ret;
+
+	/* pr->actions->type */
+	type = cpfl_json_t_to_string(ob_per_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* pr->actions->data */
+	if (strcmp(type, "sem") == 0) {
+		json_t *ob_fvs, *ob_sem;
+
+		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+		ob_sem = json_object_get(ob_per_act, "data");
+		ret = cpfl_json_t_to_uint16(ob_sem, "profile", &js_act->sem.prof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_t_to_uint16(ob_sem, "subprofile", &js_act->sem.subprof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_t_to_uint16(ob_sem, "keysize", &js_act->sem.keysize);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
+			return -EINVAL;
+		}
+		ob_fvs = json_object_get(ob_sem, "fieldvectors");
+		ret = cpfl_flow_js_pattern_act_fv(ob_fvs, js_act);
+		if (ret < 0)
+			return ret;
+	} else {
+		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act(json_t *ob_pr_acts, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len, ret;
+
+	len = json_array_size(ob_pr_acts);
+	if (len == 0)
+		return 0;
+	js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action) * len, 0);
+	if (!js_pr->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->actions_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_pr_action *js_act;
+		json_t *object;
+
+		object = json_array_get(ob_pr_acts, i);
+		js_act = &js_pr->actions[i];
+		ret = cpfl_flow_js_pattern_per_act(object, js_act);
+		if (ret < 0) {
+			rte_free(js_pr->actions);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * The patterns object array defines a set of rules directing the PMD to match sequences of
+ * rte_flow protocol headers and translate them into profile/field vectors for each pipeline
+ * stage. This object is mandatory.
+ */
+static int
+cpfl_flow_js_pattern_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_t *ob_prs;
+	int i, len;
+
+	/* Pattern Rules */
+	ob_prs = json_object_get(ob_root, "patterns");
+	if (!ob_prs) {
+		PMD_DRV_LOG(ERR, "The patterns is mandatory.");
+		return -EINVAL;
+	}
+
+	len = json_array_size(ob_prs);
+	if (len == 0)
+		return 0;
+	parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len, 0);
+	if (!parser->patterns) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	parser->pr_size = len;
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		json_t *ob_pr_actions;
+		json_t *ob_pr_key;
+		json_t *ob_pr_key_protos;
+		json_t *ob_pr_key_attrs;
+		int ret;
+
+		object = json_array_get(ob_prs, i);
+		/* pr->key */
+		ob_pr_key = json_object_get(object, "key");
+		/* pr->key->protocols */
+		ob_pr_key_protos = json_object_get(ob_pr_key, "protocols");
+		ret = cpfl_flow_js_pattern_key_proto(ob_pr_key_protos, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->protocols.");
+			goto err;
+		}
+		/* pr->key->attributes */
+		ob_pr_key_attrs = json_object_get(ob_pr_key, "attributes");
+		ret = cpfl_flow_js_pattern_key_attr(ob_pr_key_attrs, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->attributes.");
+			goto err;
+		}
+		/* pr->actions */
+		ob_pr_actions = json_object_get(object, "actions");
+		ret = cpfl_flow_js_pattern_act(ob_pr_actions, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->patterns);
+	return -EINVAL;
+}
+
+static int
+cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	int ret = 0;
+
+	ret = cpfl_flow_js_pattern_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char *filename)
+{
+	struct cpfl_flow_js_parser *parser;
+	json_error_t json_error;
+	json_t *root;
+	int ret;
+
+	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser), 0);
+	if (!parser) {
+		PMD_DRV_LOG(ERR, "Not enough memory to create flow parser.");
+		return -ENOMEM;
+	}
+	root = json_load_file(filename, 0, &json_error);
+	if (!root) {
+		PMD_DRV_LOG(ERR, "Bad JSON file \"%s\": %s", filename, json_error.text);
+		goto free_parser;
+	}
+	ret = cpfl_parser_init(root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parser init failed.");
+		goto free_parser;
+	}
+	*flow_parser = parser;
+	json_decref(root);
+
+	return 0;
+free_parser:
+	rte_free(parser);
+	return -EINVAL;
+}
+
+static void
+cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
+{
+	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM)
+		rte_free(pr_act->sem.fv);
+}
+
+int
+cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
+{
+	int i, j;
+
+	if (!parser)
+		return 0;
+
+	for (i = 0; i < parser->pr_size; i++) {
+		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
+
+		if (!pattern)
+			continue;
+		for (j = 0; j < pattern->key.proto_size; j++)
+			rte_free(pattern->key.protocols[j].fields);
+		rte_free(pattern->key.protocols);
+		rte_free(pattern->key.attributes);
+
+		for (j = 0; j < pattern->actions_size; j++) {
+			struct cpfl_flow_js_pr_action *pr_act;
+
+			pr_act = &pattern->actions[j];
+			cpfl_parser_free_pr_action(pr_act);
+		}
+		rte_free(pattern->actions);
+	}
+	rte_free(parser->patterns);
+	rte_free(parser);
+
+	return 0;
+}
+
+static int
+cpfl_get_items_length(const struct rte_flow_item *items)
+{
+	int length = 0;
+	const struct rte_flow_item *item = items;
+
+	while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
+		       uint16_t offset, uint8_t *fv)
+{
+	uint16_t v_layer, v_offset, v_mask;
+	enum rte_flow_item_type v_header;
+	int j, layer, length;
+	uint16_t temp_fv;
+
+	length = cpfl_get_items_length(items);
+	v_layer = js_fv->proto.layer;
+	v_header = js_fv->proto.header;
+	v_offset = js_fv->proto.offset;
+	v_mask = js_fv->proto.mask;
+	layer = 0;
+	for (j = 0; j < length - 1; j++) {
+		if (items[j].type == v_header) {
+			if (layer == v_layer) {
+				/* copy out 16 bits from offset */
+				const uint8_t *pointer;
+
+				pointer = &(((const uint8_t *)(items[j].spec))[v_offset]);
+				temp_fv = ntohs((*((const uint16_t *)pointer)) & v_mask);
+				fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+				fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+				break;
+			}
+			layer++;
+		} /* TODO: more type... */
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_fieldvectors(struct cpfl_itf *itf, struct cpfl_flow_js_fv *js_fvs, int size,
+			uint8_t *fv, const struct rte_flow_item *items)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		uint16_t offset, temp_fv, value_int;
+		enum cpfl_flow_js_fv_type type;
+		struct cpfl_flow_js_fv *js_fv;
+
+		js_fv = &js_fvs[i];
+		offset = js_fv->offset;
+		type = js_fv->type;
+		if (type == CPFL_FV_TYPE_IMMEDIATE) {
+			value_int = js_fv->immediate;
+			temp_fv = (value_int << 8) & 0xff00;
+			fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+		} else if (type == CPFL_FV_TYPE_METADATA) {
+			uint16_t type, v_offset, mask;
+
+			type = js_fv->meta.type;
+			v_offset = js_fv->meta.offset;
+			mask = js_fv->meta.mask;
+			temp_fv = cpfl_metadata_read16(&itf->adapter->meta, type, v_offset) & mask;
+			fv[2 * offset] = (uint8_t)(temp_fv & 0x00ff);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv >> 8);
+		} else if (type == CPFL_FV_TYPE_PROTOCOL) {
+			ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv);
+			if (ret)
+				return ret;
+		} else {
+			PMD_DRV_LOG(DEBUG, "not support this type: %d.", type);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_pr_actions(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_pr_action *actions,
+		      int size,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		struct cpfl_flow_js_pr_action *pr_act;
+		enum cpfl_flow_pr_action_type type;
+
+		pr_act = &actions[i];
+		/* pr->actions->type */
+		type = pr_act->type;
+		/* pr->actions->data */
+		if (attr->group == 1  && type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+			struct cpfl_flow_js_pr_action_sem *sem = &pr_act->sem;
+
+			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+			pr_action->sem.prof = sem->prof;
+			pr_action->sem.subprof = sem->subprof;
+			pr_action->sem.keysize = sem->keysize;
+			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
+			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
+			ret = cpfl_parse_fieldvectors(itf, sem->fv, sem->fv_size,
+						      pr_action->sem.cpfl_flow_pr_fv, items);
+			return ret;
+		} else if (attr->group > 4 || attr->group == 0) {
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_eth_mask(const char *mask, struct rte_ether_addr addr)
+{
+	int i, ret;
+	struct rte_ether_addr mask_bytes;
+
+	ret = rte_ether_unformat_addr(mask, &mask_bytes);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "translate mac address from string to rte_ether_addr failed.");
+		return -EINVAL;
+	}
+	/* validate eth mask addr if match */
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+		if (mask_bytes.addr_bytes[i] != addr.addr_bytes[i])
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
+{
+	uint32_t out_addr;
+
+	/* 0: success; -EINVAL: invalid; -ENOTSUP: fail */
+	int ret = inet_pton(AF_INET, mask, &out_addr);
+
+	if (ret < 0)
+		return -EINVAL;
+	/* validate ipv4 mask addr if match */
+	if (out_addr != addr)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_eth *eth_mask)
+{
+	int field_size, j;
+	int flag_dst_addr, flag_src_addr, flag_ether_type;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !eth_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_ether_type = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name, *s_mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst, more see Field Mapping
+		 */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		if (strcmp(name, "src_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->src) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->dst) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "ether_type") == 0) {
+			uint16_t mask = (uint16_t)field->mask_32b;
+
+			if (mask != eth_mask->type)
+				return -EINVAL;
+			flag_ether_type = true;
+		} else {
+			/* TODO: more type... */
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (strcmp((const char *)eth_mask->src.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (strcmp((const char *)eth_mask->dst.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_ether_type) {
+		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+	int field_size, j;
+	int flag_next_proto_id, flag_src_addr, flag_dst_addr;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !ipv4_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_next_proto_id = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+
+		field = &proto->fields[j];
+		name = field->name;
+		if (strcmp(name, "src_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.src_addr) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.dst_addr) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "next_proto_id") == 0) {
+			uint8_t mask;
+
+			mask = (uint8_t)field->mask_32b;
+			if (mask != ipv4_mask->hdr.next_proto_id)
+				return -EINVAL;
+			flag_next_proto_id = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_next_proto_id) {
+		if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_tcp *tcp_mask)
+{
+	int field_size, j;
+	int flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !tcp_mask)
+		return 0;
+
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (tcp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (tcp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (tcp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (tcp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_udp *udp_mask)
+{
+	int field_size, j;
+	bool flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && !udp_mask)
+		return 0;
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst */
+		name = field->name; /* match: rte_flow_item->mask */
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (udp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (udp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name: %s.", name);
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (udp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (udp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto,
+		 const struct rte_flow_item_vxlan *vxlan_mask)
+{
+	int field_size, j;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && !vxlan_mask)
+		return 0;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		int64_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		mask = (int64_t)field->mask_32b;
+		if (strcmp(name, "vx_vni") == 0) {
+			if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) != mask)
+				return -EINVAL;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_icmp *icmp_mask)
+{
+	int field_size;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if ((field_size != 0 && !icmp_mask) || (field_size == 0 && icmp_mask))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
+			     int proto_size,
+			     const struct rte_flow_item *items)
+{
+	int i, length;
+	int j = 0;
+
+	length = cpfl_get_items_length(items);
+	if (proto_size > length - 1)
+		return -EINVAL;
+	for (i = 0; i < proto_size; i++) {
+		struct cpfl_flow_js_pr_key_proto *key_proto;
+		enum rte_flow_item_type type;
+
+		key_proto = &protocols[i];
+		/* pr->key->proto->type */
+		type = key_proto->type;
+		/* pr->key->proto->fields */
+		switch (type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
+				const struct rte_flow_item_eth *eth_mask;
+				int ret;
+
+				eth_mask = (const struct rte_flow_item_eth *)items[i].mask;
+				ret = cpfl_check_eth(key_proto, eth_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				const struct rte_flow_item_ipv4 *ipv4_mask;
+				int ret;
+
+				ipv4_mask = (const struct rte_flow_item_ipv4 *)items[i].mask;
+				ret = cpfl_check_ipv4(key_proto, ipv4_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) {
+				const struct rte_flow_item_tcp *tcp_mask;
+				int ret;
+
+				tcp_mask = (const struct rte_flow_item_tcp *)items[i].mask;
+				ret = cpfl_check_tcp(key_proto, tcp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) {
+				const struct rte_flow_item_udp *udp_mask;
+				int ret;
+
+				udp_mask = (const struct rte_flow_item_udp *)items[i].mask;
+				ret = cpfl_check_udp(key_proto, udp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+				const struct rte_flow_item_vxlan *vxlan_mask;
+				int ret;
+
+				vxlan_mask = (const struct rte_flow_item_vxlan *)items[i].mask;
+				ret = cpfl_check_vxlan(key_proto, vxlan_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) {
+				const struct rte_flow_item_icmp *icmp_mask;
+				int ret;
+
+				icmp_mask = (const struct rte_flow_item_icmp *)items[i].mask;
+				ret = cpfl_check_icmp(key_proto, icmp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+	if (items[j].type != RTE_FLOW_ITEM_TYPE_END)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr,
+			    const struct rte_flow_attr *attr)
+{
+	if (key_attr->ingress != attr->ingress) {
+		PMD_DRV_LOG(DEBUG, "ingress not match.");
+		return -EINVAL;
+	}
+	if (key_attr->egress != attr->egress) {
+		PMD_DRV_LOG(DEBUG, "egress not match.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern,
+		       const struct rte_flow_item *items,
+		       const struct rte_flow_attr *attr)
+{
+	int ret;
+
+	/* pr->key */
+	/* pr->key->protocols */
+	ret = cpfl_check_pattern_key_proto(pattern->key.protocols,
+					   pattern->key.proto_size, items);
+	if (ret < 0)
+		return -EINVAL;
+	/* pr->key->attributes */
+	ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr);
+	if (ret < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* output: struct cpfl_flow_pr_action* pr_action */
+int
+cpfl_flow_parse_items(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_parser *parser,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, size;
+	struct cpfl_flow_js_pr *pattern;
+
+	size = parser->pr_size;
+	for (i = 0; i < size; i++) {
+		int ret;
+
+		pattern = &parser->patterns[i];
+		ret = cpfl_check_pattern_key(pattern, items, attr);
+		if (ret < 0)
+			continue;
+		/* pr->actions */
+		ret = cpfl_parse_pr_actions(itf, pattern->actions, pattern->actions_size,
+					    items, attr, pr_action);
+		return ret;
+	}
+
+	return -EINVAL;
+}
+
+bool
+cpfl_metadata_write_port_id(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 5;
+
+	dev_id = cpfl_get_port_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID\n");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 3);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_targetvsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 2;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 1);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 0;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
+
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 24;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
new file mode 100644
index 0000000000..367a6da574
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <jansson.h>
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#ifndef _CPFL_FLOW_PARSER_H_
+#define _CPFL_FLOW_PARSER_H_
+
+#define CPFL_FLOW_JSON_STR_SIZE_MAX 100
+#define CPFL_MAX_SEM_FV_KEY_SIZE 64
+#define CPFL_FLOW_JS_PROTO_SIZE 16
+#define CPFL_MOD_KEY_NUM_MAX 8
+
+/* Pattern Rules Storage */
+enum cpfl_flow_pr_action_type {
+	CPFL_JS_PR_ACTION_TYPE_SEM,
+	CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
+};
+
+/* This structure matches a sequence of fields in struct rte_flow_attr */
+struct cpfl_flow_js_pr_key_attr {
+	uint16_t ingress;
+	uint16_t egress;
+};
+
+struct cpfl_flow_js_pr_key_proto_field {
+	char name[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	union {
+		char mask[CPFL_FLOW_JSON_STR_SIZE_MAX];
+		uint32_t mask_32b;
+	};
+};
+
+/* This structure matches a sequence of "struct rte_flow_item" */
+struct cpfl_flow_js_pr_key_proto {
+	enum rte_flow_item_type type;
+	struct cpfl_flow_js_pr_key_proto_field *fields;
+	int fields_size;
+};
+
+enum cpfl_flow_js_fv_type {
+	CPFL_FV_TYPE_PROTOCOL,
+	CPFL_FV_TYPE_IMMEDIATE,
+	CPFL_FV_TYPE_METADATA,
+	CPFL_FV_TYPE_UNKNOWN = -1,
+};
+
+struct cpfl_flow_js_fv {
+	uint16_t offset;
+	enum cpfl_flow_js_fv_type type;
+	union {
+		/*  a 16 bits value */
+		uint16_t immediate;
+		/* a reference to a protocol header with a <header, layer, offset, mask> tuple */
+		struct {
+			enum rte_flow_item_type header;
+			uint16_t layer;
+			uint16_t offset;
+			uint16_t mask;
+		} proto;
+		/* a reference to a metadata */
+		struct {
+			uint16_t type;
+			uint16_t offset;
+			uint16_t mask;
+		} meta;
+	};
+};
+
+/**
+ * This structure defines the message be used to composite the
+ * profile / key of a SEM control packet
+ */
+struct cpfl_flow_js_pr_action_sem {
+	uint16_t prof;		    /* SEM profile ID */
+	uint16_t subprof;	    /* SEM subprofile ID */
+	uint16_t keysize;	    /*  extract key size in bytes */
+	struct cpfl_flow_js_fv *fv; /* A SEM field vector array */
+	int fv_size;
+};
+
+/* define how to map current key to low level pipeline configuration */
+struct cpfl_flow_js_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_js_pr_action_sem sem;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD how to parse rte_flow
+ * protocol headers. Each rule be described by a key object and a action array.
+ */
+struct cpfl_flow_js_pr {
+	struct {
+		struct cpfl_flow_js_pr_key_proto *protocols;
+		uint16_t proto_size;
+		struct cpfl_flow_js_pr_key_attr *attributes;
+		uint16_t attr_size;
+	} key;
+	/* An array to define how to map current key to low level pipeline configuration. */
+	struct cpfl_flow_js_pr_action *actions;
+	uint16_t actions_size;
+};
+
+struct cpfl_flow_js_parser {
+	struct cpfl_flow_js_pr *patterns;
+	int pr_size;
+};
+
+/* Pattern Rules */
+struct cpfl_flow_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	uint8_t cpfl_flow_pr_fv[CPFL_MAX_SEM_FV_KEY_SIZE];
+};
+
+struct cpfl_flow_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_pr_action_sem sem;
+	};
+};
+
+int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
+int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
+int cpfl_flow_parse_items(struct cpfl_itf *itf,
+			  struct cpfl_flow_js_parser *parser,
+			  const struct rte_flow_item *items,
+			  const struct rte_flow_attr *attr,
+			  struct cpfl_flow_pr_action *pr_action);
+bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf);
+
+static inline void
+cpfl_metadata_init(struct cpfl_metadata *meta)
+{
+	int i;
+
+	for (i = 0; i < CPFL_META_LENGTH; i++)
+		meta->chunks[i].type = i;
+}
+
+static inline void
+cpfl_metadata_write16(struct cpfl_metadata *meta, int type, int offset, uint16_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint16_t));
+}
+
+static inline void
+cpfl_metadata_write32(struct cpfl_metadata *meta, int type, int offset, uint32_t data)
+{
+	rte_memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint32_t));
+}
+
+static inline uint16_t
+cpfl_metadata_read16(struct cpfl_metadata *meta, int type, int offset)
+{
+	return *((uint16_t *)(&meta->chunks[type].data[offset]));
+}
+
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d8b92ae16a..d767818eb7 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -38,3 +38,10 @@ if arch_subdir == 'x86'
         cflags += ['-DCC_AVX512_SUPPORT']
     endif
 endif
+
+if dpdk_conf.has('RTE_HAS_JANSSON')
+    sources += files(
+            'cpfl_flow_parser.c',
+    )
+    ext_deps += jansson_dep
+endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v9 2/9] net/cpfl: build action mapping rules from JSON
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
  2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
  2023-09-28  8:44               ` [PATCH v9 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
@ 2023-09-28  8:44               ` yuying.zhang
  2023-09-28  8:44               ` [PATCH v9 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
                                 ` (10 subsequent siblings)
  13 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-28  8:44 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Build rules that maps from an rte flow action vxlan_encap or
vxlan_decap to hardware representations.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_flow_parser.c | 538 +++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_flow_parser.h | 100 ++++++
 2 files changed, 637 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
index 308f9c9736..81ccbbd8e5 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.c
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -32,6 +32,18 @@ cpfl_get_item_type_by_str(const char *type)
 	return RTE_FLOW_ITEM_TYPE_VOID;
 }
 
+static enum rte_flow_action_type
+cpfl_get_action_type_by_str(const char *type)
+{
+	if (strcmp(type, "vxlan_encap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	else if (strcmp(type, "vxlan_decap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ACTION_TYPE_VOID;
+}
+
 static const char *
 cpfl_json_t_to_string(json_t *object, const char *name)
 {
@@ -50,6 +62,29 @@ cpfl_json_t_to_string(json_t *object, const char *name)
 	return json_string_value(subobject);
 }
 
+static int
+cpfl_json_t_to_int(json_t *object, const char *name, int *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (int)json_integer_value(subobject);
+
+	return 0;
+}
+
 static int
 cpfl_json_t_to_uint16(json_t *object, const char *name, uint16_t *value)
 {
@@ -522,6 +557,228 @@ cpfl_flow_js_pattern_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 	return -EINVAL;
 }
 
+static int
+cpfl_flow_js_mr_key(json_t *ob_mr_keys, struct cpfl_flow_js_mr_key *js_mr_key)
+{
+	int len, i;
+
+	len = json_array_size(ob_mr_keys);
+	if (len == 0)
+		return 0;
+	js_mr_key->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_key_action) * len, 0);
+	if (!js_mr_key->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_mr_key->actions_size = len;
+	for (i = 0; i < len; i++) {
+		json_t *object, *ob_data;
+		const char *type;
+		enum rte_flow_action_type act_type;
+
+		object = json_array_get(ob_mr_keys, i);
+		/* mr->key->actions->type */
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		act_type = cpfl_get_action_type_by_str(type);
+		if (act_type == RTE_FLOW_ACTION_TYPE_VOID)
+			goto err;
+		js_mr_key->actions[i].type = act_type;
+		/* mr->key->actions->data */
+		ob_data = json_object_get(object, "data");
+		if (js_mr_key->actions[i].type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			json_t *ob_protos;
+			int proto_size, j;
+			struct cpfl_flow_js_mr_key_action_vxlan_encap *encap;
+
+			ob_protos = json_object_get(ob_data, "protocols");
+			encap = &js_mr_key->actions[i].encap;
+			if (!ob_protos) {
+				encap->proto_size = 0;
+				continue;
+			}
+			proto_size = json_array_size(ob_protos);
+			encap->proto_size = proto_size;
+			for (j = 0; j < proto_size; j++) {
+				const char *s;
+				json_t *subobject;
+				enum rte_flow_item_type proto_type;
+
+				subobject = json_array_get(ob_protos, j);
+				s = json_string_value(subobject);
+				proto_type = cpfl_get_item_type_by_str(s);
+				if (proto_type == RTE_FLOW_ITEM_TYPE_VOID) {
+					PMD_DRV_LOG(ERR, "parse VXLAN_ENCAP failed.");
+					goto err;
+				}
+				encap->protocols[j] = proto_type;
+			}
+		} else if (js_mr_key->actions[i].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			PMD_DRV_LOG(ERR, "not support this type: %d.", js_mr_key->actions[i].type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mr_key->actions);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_layout(json_t *ob_layouts, struct cpfl_flow_js_mr_action_mod *js_mod)
+{
+	int len, i;
+
+	len = json_array_size(ob_layouts);
+	js_mod->layout_size = len;
+	if (len == 0)
+		return 0;
+	js_mod->layout = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_layout) * len, 0);
+	if (!js_mod->layout) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		int index = 0, size = 0, offset = 0;
+		int ret;
+		const char *hint;
+
+		object = json_array_get(ob_layouts, i);
+		ret = cpfl_json_t_to_int(object, "index", &index);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'index'.");
+			goto err;
+		}
+		js_mod->layout[i].index = index;
+		ret = cpfl_json_t_to_int(object, "size", &size);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+			goto err;
+		}
+		js_mod->layout[i].size = size;
+		ret = cpfl_json_t_to_int(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_mod->layout[i].offset = offset;
+		hint = cpfl_json_t_to_string(object, "hint");
+		if (!hint) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'hint'.");
+			goto err;
+		}
+		memcpy(js_mod->layout[i].hint, hint, strlen(hint));
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mod->layout);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_action(json_t *ob_mr_act, struct cpfl_flow_js_mr_action *js_mr_act)
+{
+	json_t *ob_data;
+	const char *type;
+
+	/* mr->action->type */
+	type = cpfl_json_t_to_string(ob_mr_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* mr->action->data */
+	ob_data = json_object_get(ob_mr_act, "data");
+	if (strcmp(type, "mod") == 0) {
+		json_t *ob_layouts;
+		uint16_t profile = 0;
+		int ret;
+
+		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		ret = cpfl_json_t_to_uint16(ob_data, "profile", &profile);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		js_mr_act->mod.prof = profile;
+		ob_layouts = json_object_get(ob_data, "layout");
+		ret = cpfl_flow_js_mr_layout(ob_layouts, &js_mr_act->mod);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse layout.");
+			return ret;
+		}
+	} else  {
+		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * The modifications object array defines a set of rules for the PMD to match rte_flow
+ * modification actions and translate them into the Modification profile. This object
+ * is optional.
+ */
+static int
+cpfl_flow_js_mod_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_t *ob_mrs;
+	int i, len;
+
+	ob_mrs = json_object_get(ob_root, "modifications");
+	if (!ob_mrs) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+	len = json_array_size(ob_mrs);
+	if (len == 0)
+		return 0;
+	parser->mr_size = len;
+	parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr) * len, 0);
+	if (!parser->modifications) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		int ret;
+		json_t *object, *ob_mr_key, *ob_mr_action, *ob_mr_key_action;
+
+		object = json_array_get(ob_mrs, i);
+		/* mr->key */
+		ob_mr_key = json_object_get(object, "key");
+		/* mr->key->actions */
+		ob_mr_key_action = json_object_get(ob_mr_key, "actions");
+		ret = cpfl_flow_js_mr_key(ob_mr_key_action, &parser->modifications[i].key);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_key failed.");
+			goto err;
+		}
+		/* mr->action */
+		ob_mr_action = json_object_get(object, "action");
+		ret = cpfl_flow_js_mr_action(ob_mr_action, &parser->modifications[i].action);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_action failed.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->modifications);
+	return -EINVAL;
+}
+
 static int
 cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 {
@@ -532,6 +789,11 @@ cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
 		return ret;
 	}
+	ret = cpfl_flow_js_mod_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
+		return ret;
+	}
 
 	return 0;
 }
@@ -602,6 +864,15 @@ cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
 		rte_free(pattern->actions);
 	}
 	rte_free(parser->patterns);
+	for (i = 0; i < parser->mr_size; i++) {
+		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
+
+		if (!mr)
+			continue;
+		rte_free(mr->key.actions);
+		rte_free(mr->action.mod.layout);
+	}
+	rte_free(parser->modifications);
 	rte_free(parser);
 
 	return 0;
@@ -618,6 +889,17 @@ cpfl_get_items_length(const struct rte_flow_item *items)
 	return length;
 }
 
+static int
+cpfl_get_actions_length(const struct rte_flow_action *actions)
+{
+	int length = 0;
+	const struct rte_flow_action *action = actions;
+
+	while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END)
+		continue;
+	return length;
+}
+
 static int
 cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
 		       uint16_t offset, uint8_t *fv)
@@ -646,7 +928,7 @@ cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item
 				break;
 			}
 			layer++;
-		} /* TODO: more type... */
+		}
 	}
 
 	return 0;
@@ -1235,6 +1517,260 @@ cpfl_flow_parse_items(struct cpfl_itf *itf,
 	return -EINVAL;
 }
 
+/* modifications rules */
+static int
+cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap *encap,
+			       const struct rte_flow_action *action)
+{
+	const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+	struct rte_flow_item *definition;
+	int def_length, i, proto_size;
+
+	action_vxlan_encap = (const struct rte_flow_action_vxlan_encap *)action->conf;
+	definition = action_vxlan_encap->definition;
+	def_length = cpfl_get_items_length(definition);
+	proto_size = encap->proto_size;
+	if (proto_size != def_length - 1) {
+		PMD_DRV_LOG(DEBUG, "protocols not match.");
+		return -EINVAL;
+	}
+	for (i = 0; i < proto_size; i++) {
+		enum rte_flow_item_type proto;
+
+		proto = encap->protocols[i];
+		if (proto == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) {
+				PMD_DRV_LOG(DEBUG, "protocols not match.");
+				return -EINVAL;
+			}
+		} else if (proto != definition[i].type) {
+			PMD_DRV_LOG(DEBUG, "protocols not match.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* check and parse */
+static int
+cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int size,
+			 const struct rte_flow_action *actions,
+			 struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int actions_length, i;
+	int j = 0;
+	int ret;
+
+	actions_length = cpfl_get_actions_length(actions);
+	if (size > actions_length - 1)
+		return -EINVAL;
+	for (i = 0; i < size; i++) {
+		enum rte_flow_action_type type;
+		struct cpfl_flow_js_mr_key_action *key_act;
+
+		key_act = &key_acts[i];
+		/* mr->key->actions->type */
+		type = key_act->type;
+		/* mr->key->actions->data */
+		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			int proto_size, k;
+			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
+
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+			mr_key_action[i].encap.action = &actions[j];
+			encap = &mr_key_action[i].encap;
+
+			proto_size = key_act->encap.proto_size;
+			encap->proto_size = proto_size;
+			for (k = 0; k < proto_size; k++) {
+				enum rte_flow_item_type proto;
+
+				proto = key_act->encap.protocols[k];
+				encap->protocols[k] = proto;
+			}
+			ret = cpfl_check_actions_vxlan_encap(encap, &actions[j]);
+			if (ret < 0)
+				return -EINVAL;
+			j++;
+		} else if (type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+			j++;
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+/* output: uint8_t *buffer, uint16_t *byte_len */
+static int
+cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
+		  struct cpfl_flow_mr_key_action *mr_key_action,
+		  uint8_t *buffer, uint16_t *byte_len)
+{
+	int i;
+	int start = 0;
+
+	for (i = 0; i < layout_size; i++) {
+		int index, size, offset;
+		const char *hint;
+		const uint8_t *addr = NULL;
+		struct cpfl_flow_mr_key_action *temp;
+		struct cpfl_flow_js_mr_layout *layout;
+
+		layout = &layouts[i];
+		/* index links to the element of the actions array. */
+		index = layout->index;
+		size = layout->size;
+		offset = layout->offset;
+		if (index == -1) {
+			hint = "dummpy";
+			start += size;
+			continue;
+		}
+		hint = layout->hint;
+		temp = mr_key_action + index;
+		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+			struct rte_flow_item *definition;
+			int def_length, k;
+
+			action_vxlan_encap =
+			    (const struct rte_flow_action_vxlan_encap *)temp->encap.action->conf;
+			definition = action_vxlan_encap->definition;
+			def_length = cpfl_get_items_length(definition);
+			for (k = 0; k < def_length - 1; k++) {
+				if ((strcmp(hint, "eth") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_ETH) ||
+				    (strcmp(hint, "ipv4") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+				    (strcmp(hint, "udp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_UDP) ||
+				    (strcmp(hint, "tcp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_TCP) ||
+				    (strcmp(hint, "vxlan") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_VXLAN)) {
+					addr = (const uint8_t *)(definition[k].spec);
+					if (start > 255) {
+						*byte_len = 0;
+						PMD_DRV_LOG(ERR, "byte length is too long: %s",
+							    hint);
+						return -EINVAL;
+					}
+					memcpy(buffer + start, addr + offset, size);
+					break;
+				} /* TODO: more hint... */
+			}
+			if (k == def_length - 1) {
+				*byte_len = 0;
+				PMD_DRV_LOG(ERR, "can not find corresponding hint: %s", hint);
+				return -EINVAL;
+			}
+		} else {
+			*byte_len = 0;
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", temp->type);
+			return -EINVAL;
+		} /* else TODO: more type... */
+		start += size;
+	}
+	*byte_len = start;
+
+	return 0;
+}
+
+static int
+cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action,
+		     struct cpfl_flow_mr_key_action *mr_key_action,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	enum cpfl_flow_mr_action_type type;
+
+	/* mr->action->type */
+	type = action->type;
+	/* mr->action->data */
+	if (type == CPFL_JS_MR_ACTION_TYPE_MOD) {
+		struct cpfl_flow_js_mr_layout *layout;
+
+		mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		mr_action->mod.byte_len = 0;
+		mr_action->mod.prof = action->mod.prof;
+		layout = action->mod.layout;
+		if (!layout)
+			return 0;
+		memset(mr_action->mod.data, 0, sizeof(mr_action->mod.data));
+
+		return cpfl_parse_layout(layout, action->mod.layout_size, mr_key_action,
+					 mr_action->mod.data, &mr_action->mod.byte_len);
+	}
+	PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+
+	return -EINVAL;
+}
+
+static int
+cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action *actions,
+		   struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int key_action_size;
+
+	/* mr->key->actions */
+	key_action_size = mr->key.actions_size;
+	return cpfl_parse_mr_key_action(mr->key.actions, key_action_size, actions, mr_key_action);
+}
+
+/* output: struct cpfl_flow_mr_action *mr_action */
+static int
+cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	int i;
+	struct cpfl_flow_mr_key_action mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0};
+
+	for (i = 0; i < parser->mr_size; i++) {
+		int ret;
+		struct cpfl_flow_js_mr *mr;
+
+		mr = &parser->modifications[i];
+		if (!mr)
+			return -EINVAL;
+		ret = cpfl_check_mod_key(mr, actions, mr_key_action);
+		if (ret < 0)
+			continue;
+		/* mr->action */
+		return cpfl_parse_mr_action(&mr->action, mr_key_action, mr_action);
+	}
+
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+			struct cpfl_flow_mr_action *mr_action)
+{
+	/* modifications rules */
+	if (!parser->modifications) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	return cpfl_parse_mod_rules(parser, actions, mr_action);
+}
+
 bool
 cpfl_metadata_write_port_id(struct cpfl_itf *itf)
 {
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
index 367a6da574..b7bf21bd76 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.h
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -105,9 +105,79 @@ struct cpfl_flow_js_pr {
 	uint16_t actions_size;
 };
 
+/* Modification Rules Storage */
+/**
+ * The vxlan_encap action matches RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
+ * and include a sequence of protocol headers defined in field protocols
+ * of data.
+ */
+struct cpfl_flow_js_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	int proto_size;
+};
+
+/* A set of modification rte_flow_action_xxx objects can be defined as a type / data pair. */
+struct cpfl_flow_js_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_js_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_js_mr_key {
+	struct cpfl_flow_js_mr_key_action *actions;
+	int actions_size;
+};
+
+struct cpfl_flow_js_mr_layout {
+	int index;				/* links to the element of the actions array */
+	char hint[CPFL_FLOW_JSON_STR_SIZE_MAX]; /* where the data to copy from */
+	uint16_t offset;			/* the start byte of the data to copy from */
+	uint16_t size; /*  bytes of the data to be copied to the memory region */
+};
+
+/** For mod data, besides the profile ID, a layout array defines a set of hints that helps
+ * driver composing the MOD memory region when the action need to insert/update some packet
+ * data from user input.
+ */
+struct cpfl_flow_js_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	struct cpfl_flow_js_mr_layout *layout;
+	int layout_size;
+};
+
+enum cpfl_flow_mr_action_type {
+	CPFL_JS_MR_ACTION_TYPE_MOD,
+};
+
+/** Currently, the type can only be mod.
+ *
+ * For mod data, besides the profile ID, a layout array defines a set
+ * of hints that helps driver composing the MOD memory region when the
+ * action need to insert/update some packet data from user input.
+ */
+struct cpfl_flow_js_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_js_mr_action_mod mod;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD to parse rte_flow modification
+ * actions. Each rule be described by a pair of key and action
+ */
+struct cpfl_flow_js_mr {
+	struct cpfl_flow_js_mr_key key;
+	struct cpfl_flow_js_mr_action action;
+};
+
 struct cpfl_flow_js_parser {
 	struct cpfl_flow_js_pr *patterns;
 	int pr_size;
+	struct cpfl_flow_js_mr *modifications;
+	int mr_size;
 };
 
 /* Pattern Rules */
@@ -125,6 +195,33 @@ struct cpfl_flow_pr_action {
 	};
 };
 
+/* Modification Rules */
+struct cpfl_flow_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	uint16_t proto_size;
+	const struct rte_flow_action *action;
+};
+
+struct cpfl_flow_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	uint8_t data[256];
+};
+
+struct cpfl_flow_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_mr_action_mod mod;
+	};
+};
+
 int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
 int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
 int cpfl_flow_parse_items(struct cpfl_itf *itf,
@@ -132,6 +229,9 @@ int cpfl_flow_parse_items(struct cpfl_itf *itf,
 			  const struct rte_flow_item *items,
 			  const struct rte_flow_attr *attr,
 			  struct cpfl_flow_pr_action *pr_action);
+int cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser,
+			    const struct rte_flow_action *actions,
+			    struct cpfl_flow_mr_action *mr_action);
 bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
 bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
 bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v9 3/9] net/cpfl: set up rte flow skeleton
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
                                 ` (2 preceding siblings ...)
  2023-09-28  8:44               ` [PATCH v9 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
@ 2023-09-28  8:44               ` yuying.zhang
  2023-10-15 13:01                 ` Thomas Monjalon
  2023-09-28  8:44               ` [PATCH v9 4/9] net/cpfl: set up control path yuying.zhang
                                 ` (9 subsequent siblings)
  13 siblings, 1 reply; 128+ messages in thread
From: yuying.zhang @ 2023-09-28  8:44 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Set up the rte_flow backend skeleton. Introduce the framework
to support different engines as rte_flow backend. Bridge rte_flow
driver API to flow engines.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  53 ++++++
 drivers/net/cpfl/cpfl_ethdev.h |   5 +
 drivers/net/cpfl/cpfl_flow.c   | 339 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow.h   |  85 +++++++++
 drivers/net/cpfl/meson.build   |   1 +
 5 files changed, 483 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 1745f703c8..c350728861 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -15,6 +15,7 @@
 #include "cpfl_ethdev.h"
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1074,6 +1075,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+	struct rte_flow *p_flow;
+
+	while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+		TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+		if (p_flow->engine->free)
+			p_flow->engine->free(p_flow);
+		rte_free(p_flow);
+	}
+}
+
 static int
 cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
 {
@@ -1105,6 +1119,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
 		cpfl_p2p_queue_grps_del(vport);
 
+	cpfl_flow_free(cpfl_vport);
 	idpf_vport_deinit(vport);
 	rte_free(cpfl_vport->p2p_q_chunks_info);
 
@@ -1117,6 +1132,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+		      const struct rte_flow_ops **ops)
+{
+	struct cpfl_itf *itf;
+
+	if (!dev)
+		return -EINVAL;
+
+	itf = CPFL_DEV_TO_ITF(dev);
+
+	/* only vport support rte_flow */
+	if (itf->type != CPFL_ITF_TYPE_VPORT)
+		return -ENOTSUP;
+#ifdef RTE_HAS_JANSSON
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static int
 cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 			    size_t len, uint32_t tx)
@@ -1318,6 +1356,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.xstats_get			= cpfl_dev_xstats_get,
 	.xstats_get_names		= cpfl_dev_xstats_get_names,
 	.xstats_reset			= cpfl_dev_xstats_reset,
+	.flow_ops_get			= cpfl_dev_flow_ops_get,
 	.hairpin_cap_get		= cpfl_hairpin_cap_get,
 	.rx_hairpin_queue_setup		= cpfl_rx_hairpin_queue_setup,
 	.tx_hairpin_queue_setup		= cpfl_tx_hairpin_queue_setup,
@@ -2021,6 +2060,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+#ifdef RTE_HAS_JANSSON
+	ret = cpfl_flow_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init flow module");
+		goto err_flow_init;
+	}
+#endif
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2028,6 +2074,9 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+#ifdef RTE_HAS_JANSSON
+err_flow_init:
+#endif
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2182,6 +2231,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
 	cpfl_vport->itf.adapter = adapter;
 	cpfl_vport->itf.data = dev->data;
+	TAILQ_INIT(&cpfl_vport->itf.flow_list);
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
@@ -2262,6 +2312,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+#ifdef RTE_HAS_JANSSON
+	cpfl_flow_uninit(adapter);
+#endif
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 383dbd14c6..69bf32cfbd 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -140,9 +140,12 @@ enum cpfl_itf_type {
 	CPFL_ITF_TYPE_REPRESENTOR,
 };
 
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
+	struct cpfl_flow_list flow_list;
 	void *data;
 };
 
@@ -206,6 +209,8 @@ struct cpfl_adapter_ext {
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
 
+	struct cpfl_flow_js_parser *flow_parser;
+
 	struct cpfl_metadata meta;
 };
 
diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c
new file mode 100644
index 0000000000..03dd1ffa44
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.c
@@ -0,0 +1,339 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+	TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->parse_pattern_action)
+			continue;
+
+		if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+			continue;
+		return engine;
+	}
+
+	return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+	int ret;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->init) {
+			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+				     engine->type);
+			return -ENOTSUP;
+		}
+
+		ret = engine->init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+				     engine->type);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (engine->uninit)
+			engine->uninit(adapter);
+	}
+}
+
+static int
+cpfl_flow_attr_valid(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	if (attr->priority > CPFL_PREC_MAX) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Only support priority 0-7.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	int ret;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = cpfl_flow_attr_valid(attr, error);
+	if (ret)
+		return ret;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+__cpfl_flow_validate(struct rte_eth_dev *dev,
+		     const struct rte_flow_attr *attr,
+		     const struct rte_flow_item pattern[],
+		     const struct rte_flow_action actions[],
+		     void **meta,
+		     struct cpfl_flow_engine **engine,
+		     struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+	if (ret)
+		return ret;
+
+	*engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta);
+	if (!*engine) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched engine.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	int ret;
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error);
+
+	return ret;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_engine *engine = NULL;
+	struct rte_flow *flow;
+	void *meta;
+	int ret;
+
+	flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return NULL;
+	}
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	if (!engine->create) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched flow creation function");
+		rte_free(flow);
+		return NULL;
+	}
+
+	ret = engine->create(dev, flow, meta, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	flow->engine = engine;
+	TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+	return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	int ret = 0;
+
+	if (!flow || !flow->engine || !flow->engine->destroy) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	ret = flow->engine->destroy(dev, flow, error);
+	if (!ret)
+		TAILQ_REMOVE(&itf->flow_list, flow, next);
+	else
+		PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+	return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct rte_flow *p_flow;
+	void *temp;
+	int ret = 0;
+
+	RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+		ret = cpfl_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to flush flows");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		const struct rte_flow_action *actions,
+		void *data,
+		struct rte_flow_error *error)
+{
+	struct rte_flow_query_count *count = data;
+	int ret = -EINVAL;
+
+	if (!flow || !flow->engine || !flow->engine->query_count) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			ret = flow->engine->query_count(dev, flow, count, error);
+			break;
+		default:
+			ret = rte_flow_error_set(error, ENOTSUP,
+						 RTE_FLOW_ERROR_TYPE_ACTION,
+						 actions,
+						 "action not supported");
+			break;
+		}
+	}
+
+	return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+	.validate = cpfl_flow_validate,
+	.create = cpfl_flow_create,
+	.destroy = cpfl_flow_destroy,
+	.flush = cpfl_flow_flush,
+	.query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+	int ret;
+
+	if (ad->devargs.flow_parser[0] == '\0') {
+		PMD_INIT_LOG(WARNING, "flow module is not initialized");
+		return 0;
+	}
+
+	ret = cpfl_flow_engine_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init flow engines");
+		goto err;
+	}
+
+	ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to create flow parser");
+		goto err;
+	}
+
+	return ret;
+
+err:
+	cpfl_flow_engine_uninit(ad);
+	return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+	if (ad->devargs.flow_parser[0] == '\0')
+		return;
+
+	cpfl_parser_destroy(ad->flow_parser);
+	cpfl_flow_engine_uninit(ad);
+}
diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h
new file mode 100644
index 0000000000..8c19b853ca
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.h
@@ -0,0 +1,85 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#define CPFL_PREC_MAX 7
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+	CPFL_FLOW_ENGINE_NONE = 0,
+	CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+			       struct rte_flow *flow,
+			       void *meta,
+			       struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+				struct rte_flow *flow,
+				struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+			      struct rte_flow *flow,
+			      struct rte_flow_query_count *count,
+			      struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+					     const struct rte_flow_attr *attr,
+					     const struct rte_flow_item pattern[],
+					     const struct rte_flow_action actions[],
+					     void **meta);
+
+struct cpfl_flow_engine {
+	TAILQ_ENTRY(cpfl_flow_engine) node;
+	enum cpfl_flow_engine_type type;
+	engine_init_t init;
+	engine_uninit_t uninit;
+	engine_create_t create;
+	engine_destroy_t destroy;
+	engine_query_t query_count;
+	engine_free_t free;
+	engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) next;
+	struct cpfl_flow_engine *engine;
+	void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev,
+						const struct rte_flow_attr *attr,
+						const struct rte_flow_item pattern[],
+						const struct rte_flow_action actions[],
+						void **meta);
+int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+				  const struct rte_flow_attr *attr,
+				  const struct rte_flow_item pattern[],
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+		    struct rte_flow *flow,
+		    const struct rte_flow_action *actions,
+		    void *data,
+		    struct rte_flow_error *error);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d767818eb7..f5654d5b0e 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -41,6 +41,7 @@ endif
 
 if dpdk_conf.has('RTE_HAS_JANSSON')
     sources += files(
+	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
     )
     ext_deps += jansson_dep
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v9 4/9] net/cpfl: set up control path
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
                                 ` (3 preceding siblings ...)
  2023-09-28  8:44               ` [PATCH v9 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
@ 2023-09-28  8:44               ` yuying.zhang
  2023-09-28  8:44               ` [PATCH v9 5/9] net/cpfl: add FXP low level implementation yuying.zhang
                                 ` (8 subsequent siblings)
  13 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-28  8:44 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Set up control vport and control queue for flow offloading.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_controlq.c | 801 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  75 +++
 drivers/net/cpfl/cpfl_ethdev.c   | 270 +++++++++++
 drivers/net/cpfl/cpfl_ethdev.h   |  14 +
 drivers/net/cpfl/cpfl_vchnl.c    | 144 ++++++
 drivers/net/cpfl/meson.build     |   1 +
 6 files changed, 1305 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h

diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
new file mode 100644
index 0000000000..4a925bc338
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -0,0 +1,801 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include "cpfl_controlq.h"
+#include "base/idpf_controlq.h"
+#include "rte_common.h"
+
+/**
+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
+ * @qinfo: pointer to create control queue info struct
+ *
+ * Verify that DMA parameter of each DMA memory struct is present and
+ * consistent with control queue parameters
+ */
+static inline int
+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
+{
+	struct idpf_dma_mem *ring = &qinfo->ring_mem;
+	struct idpf_dma_mem *buf = &qinfo->buf_mem;
+
+	if (!ring->va || !ring->size)
+		return -EINVAL;
+
+	if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
+		return -EINVAL;
+
+	/* no need for buffer checks for TX queues */
+	if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
+		return 0;
+
+	if (!buf->va || !buf->size)
+		return -EINVAL;
+
+	/* accommodate different types of rx ring buffer sizes */
+	if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
+	     buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
+	    (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
+	     buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ * @qinfo: pointer to create queue info struct
+ *
+ * The CP takes care of all DMA memory allocations. Store the allocated memory
+ * information for the descriptor ring and buffers. If the memory for either the
+ * descriptor ring or the buffers is not allocated properly and/or inconsistent
+ * with the control queue parameters, this routine will free the memory for
+ * both the descriptors and the buffers
+ */
+int
+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
+			 struct cpfl_ctlq_create_info *qinfo)
+{
+	int ret_code = 0;
+	unsigned int elem_size;
+	int i = 0;
+
+	ret_code = cpfl_check_dma_mem_parameters(qinfo);
+	if (ret_code)
+		/* TODO: Log an error message per CP */
+		goto err;
+
+	cq->desc_ring.va = qinfo->ring_mem.va;
+	cq->desc_ring.pa = qinfo->ring_mem.pa;
+	cq->desc_ring.size = qinfo->ring_mem.size;
+
+	switch (cq->cq_type) {
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_RDMA_RX:
+		/* Only receive queues will have allocated buffers
+		 * during init.  CP allocates one big chunk of DMA
+		 * region who size is equal to ring_len * buff_size.
+		 * In CPFLib, the block gets broken down to multiple
+		 * smaller blocks that actually gets programmed in the hardware.
+		 */
+
+		cq->bi.rx_buff = (struct idpf_dma_mem **)
+			idpf_calloc(hw, cq->ring_size,
+				    sizeof(struct idpf_dma_mem *));
+		if (!cq->bi.rx_buff) {
+			ret_code = -ENOMEM;
+			/* TODO: Log an error message per CP */
+			goto err;
+		}
+
+		elem_size = qinfo->buf_size;
+		for (i = 0; i < cq->ring_size; i++) {
+			cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
+					    (hw, 1,
+					     sizeof(struct idpf_dma_mem));
+			if (!cq->bi.rx_buff[i]) {
+				ret_code = -ENOMEM;
+				goto free_rx_buffs;
+			}
+			cq->bi.rx_buff[i]->va =
+			    (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));
+			cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
+					       (i * elem_size);
+			cq->bi.rx_buff[i]->size = elem_size;
+		}
+		break;
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_RDMA_TX:
+	case IDPF_CTLQ_TYPE_RDMA_COMPL:
+		break;
+	default:
+		ret_code = -EINVAL;
+	}
+
+	return ret_code;
+
+free_rx_buffs:
+	i--;
+	for (; i >= 0; i--)
+		idpf_free(hw, cq->bi.rx_buff[i]);
+
+	if (!cq->bi.rx_buff)
+		idpf_free(hw, cq->bi.rx_buff);
+
+err:
+	return ret_code;
+}
+
+/**
+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	for (i = 0; i < cq->ring_size; i++) {
+		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+		/* No buffer to post to descriptor, continue */
+		if (!bi)
+			continue;
+
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+		desc->opcode = 0;
+		desc->datalen = CPU_TO_LE16(bi->size);
+		desc->ret_val = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+		desc->params.indirect.param0 = 0;
+		desc->params.indirect.param1 = 0;
+	}
+}
+
+/**
+ * cpfl_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
+{
+	/* set control queue registers in our local struct */
+	cq->reg.head = q_create_info->reg.head;
+	cq->reg.tail = q_create_info->reg.tail;
+	cq->reg.len = q_create_info->reg.len;
+	cq->reg.bah = q_create_info->reg.bah;
+	cq->reg.bal = q_create_info->reg.bal;
+	cq->reg.len_mask = q_create_info->reg.len_mask;
+	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+	cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * cpfl_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
+{
+	/* Update tail to post pre-allocated buffers for rx queues */
+	if (is_rxq)
+		wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
+
+	/* For non-Mailbox control queues only TAIL need to be set */
+	if (cq->q_id != -1)
+		return;
+
+	/* Clear Head for both send or receive */
+	wr32(hw, cq->reg.head, 0);
+
+	/* set starting point */
+	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
+ * @hw: context info for the callback
+ * @cq: pointer to the specific control queue
+ *
+ * DMA buffers are released by the CP itself
+ */
+static void
+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
+{
+	int i;
+
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
+	    cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
+		for (i = 0; i < cq->ring_size; i++)
+			idpf_free(hw, cq->bi.rx_buff[i]);
+		/* free the buffer header */
+		idpf_free(hw, cq->bi.rx_buff);
+	} else {
+		idpf_free(hw, cq->bi.tx_msg);
+	}
+}
+
+/**
+ * cpfl_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ */
+int
+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+	      struct idpf_ctlq_info **cq_out)
+{
+	struct idpf_ctlq_info *cq;
+	bool is_rxq = false;
+	int status = 0;
+
+	if (!qinfo->len || !qinfo->buf_size ||
+	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+		return -EINVAL;
+
+	cq = (struct idpf_ctlq_info *)
+	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+
+	if (!cq)
+		return -ENOMEM;
+
+	cq->cq_type = qinfo->type;
+	cq->q_id = qinfo->id;
+	cq->buf_size = qinfo->buf_size;
+	cq->ring_size = qinfo->len;
+
+	cq->next_to_use = 0;
+	cq->next_to_clean = 0;
+	cq->next_to_post = cq->ring_size - 1;
+
+	switch (qinfo->type) {
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+		is_rxq = true;
+		/* fallthrough */
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+		status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
+		break;
+
+	default:
+		status = -EINVAL;
+		break;
+	}
+
+	if (status)
+		goto init_free_q;
+
+	if (is_rxq) {
+		cpfl_ctlq_init_rxq_bufs(cq);
+	} else {
+		/* Allocate the array of msg pointers for TX queues */
+		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
+			idpf_calloc(hw, qinfo->len,
+				    sizeof(struct idpf_ctlq_msg *));
+		if (!cq->bi.tx_msg) {
+			status = -ENOMEM;
+			goto init_dealloc_q_mem;
+		}
+	}
+
+	cpfl_ctlq_setup_regs(cq, qinfo);
+
+	cpfl_ctlq_init_regs(hw, cq, is_rxq);
+
+	idpf_init_lock(&cq->cq_lock);
+
+	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
+
+	*cq_out = cq;
+	return status;
+
+init_dealloc_q_mem:
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+	idpf_free(hw, cq);
+	cq = NULL;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+	       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_desc *desc;
+	int num_desc_avail = 0;
+	int status = 0;
+	int i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	/* Ensure there are enough descriptors to send all messages */
+	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+		status = -ENOSPC;
+		goto sq_send_command_out;
+	}
+
+	for (i = 0; i < num_q_msg; i++) {
+		struct idpf_ctlq_msg *msg = &q_msg[i];
+
+		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+		desc->opcode = CPU_TO_LE16(msg->opcode);
+		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+		desc->cookie_high =
+			CPU_TO_LE32(msg->cookie.mbx.chnl_opcode);
+		desc->cookie_low =
+			CPU_TO_LE32(msg->cookie.mbx.chnl_retval);
+		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+				IDPF_CTLQ_FLAG_HOST_ID_S);
+		if (msg->data_len) {
+			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+			desc->datalen |= CPU_TO_LE16(msg->data_len);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+			/* Update the address values in the desc with the pa
+			 * value for respective buffer
+			 */
+			desc->params.indirect.addr_high =
+				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+			desc->params.indirect.addr_low =
+				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		} else {
+			idpf_memcpy(&desc->params, msg->ctx.direct,
+				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		}
+
+		/* Store buffer info */
+		cq->bi.tx_msg[cq->next_to_use] = msg;
+		(cq->next_to_use)++;
+		if (cq->next_to_use == cq->ring_size)
+			cq->next_to_use = 0;
+	}
+
+	/* Force memory write to complete before letting hardware
+	 * know that there are new descriptors to fetch.
+	 */
+	idpf_wmb();
+	wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+	idpf_release_lock(&cq->cq_lock);
+
+	return status;
+}
+
+/**
+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
+ * back for the requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ * @force: (input) clean descriptors which were not done yet. Use with caution
+ * in kernel mode only
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+static int
+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		     struct idpf_ctlq_msg *msg_status[], bool force)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t i = 0, num_to_clean;
+	uint16_t ntc, desc_err;
+	int ret = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*clean_count == 0)
+		return 0;
+	if (*clean_count > cq->ring_size)
+		return -EINVAL;
+
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *clean_count;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		desc_err = LE16_TO_CPU(desc->ret_val);
+		if (desc_err) {
+			/* strip off FW internal code */
+			desc_err &= 0xff;
+		}
+
+		msg_status[i] = cq->bi.tx_msg[ntc];
+		if (!msg_status[i])
+			break;
+		msg_status[i]->status = desc_err;
+		cq->bi.tx_msg[ntc] = NULL;
+		/* Zero out any stale data */
+		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	}
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+
+	/* Return number of descriptors actually cleaned */
+	*clean_count = i;
+
+	return ret;
+}
+
+/**
+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		   struct idpf_ctlq_msg *msg_status[])
+{
+	return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
+}
+
+/**
+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t ntp = cq->next_to_post;
+	bool buffs_avail = false;
+	uint16_t tbp = ntp + 1;
+	int status = 0;
+	int i = 0;
+
+	if (*buff_count > cq->ring_size)
+		return -EINVAL;
+
+	if (*buff_count > 0)
+		buffs_avail = true;
+	idpf_acquire_lock(&cq->cq_lock);
+	if (tbp >= cq->ring_size)
+		tbp = 0;
+
+	if (tbp == cq->next_to_clean)
+		/* Nothing to do */
+		goto post_buffs_out;
+
+	/* Post buffers for as many as provided or up until the last one used */
+	while (ntp != cq->next_to_clean) {
+		desc = IDPF_CTLQ_DESC(cq, ntp);
+		if (cq->bi.rx_buff[ntp])
+			goto fill_desc;
+		if (!buffs_avail) {
+			/* If the caller hasn't given us any buffers or
+			 * there are none left, search the ring itself
+			 * for an available buffer to move to this
+			 * entry starting at the next entry in the ring
+			 */
+			tbp = ntp + 1;
+			/* Wrap ring if necessary */
+			if (tbp >= cq->ring_size)
+				tbp = 0;
+
+			while (tbp != cq->next_to_clean) {
+				if (cq->bi.rx_buff[tbp]) {
+					cq->bi.rx_buff[ntp] =
+						cq->bi.rx_buff[tbp];
+					cq->bi.rx_buff[tbp] = NULL;
+
+					/* Found a buffer, no need to
+					 * search anymore
+					 */
+					break;
+				}
+
+				/* Wrap ring if necessary */
+				tbp++;
+				if (tbp >= cq->ring_size)
+					tbp = 0;
+			}
+
+			if (tbp == cq->next_to_clean)
+				goto post_buffs_out;
+		} else {
+			/* Give back pointer to DMA buffer */
+			cq->bi.rx_buff[ntp] = buffs[i];
+			i++;
+
+			if (i >= *buff_count)
+				buffs_avail = false;
+		}
+
+fill_desc:
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+		/* Post buffers to descriptor */
+		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+		ntp++;
+		if (ntp == cq->ring_size)
+			ntp = 0;
+	}
+
+post_buffs_out:
+	/* Only update tail if buffers were actually posted */
+	if (cq->next_to_post != ntp) {
+		if (ntp)
+			/* Update next_to_post to ntp - 1 since current ntp
+			 * will not have a buffer
+			 */
+			cq->next_to_post = ntp - 1;
+		else
+			/* Wrap to end of end ring since current ntp is 0 */
+			cq->next_to_post = cq->ring_size - 1;
+
+		wr32(hw, cq->reg.tail, cq->next_to_post);
+	}
+
+	idpf_release_lock(&cq->cq_lock);
+	/* return the number of buffers that were not posted */
+	*buff_count = *buff_count - i;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+	       struct idpf_ctlq_msg *q_msg)
+{
+	uint16_t num_to_clean, ntc, ret_val, flags;
+	struct idpf_ctlq_desc *desc;
+	int ret_code = 0;
+	uint16_t i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*num_q_msg == 0)
+		return 0;
+	else if (*num_q_msg > cq->ring_size)
+		return -EINVAL;
+
+	/* take the lock before we start messing with the ring */
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *num_q_msg;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		flags = LE16_TO_CPU(desc->flags);
+		if (!(flags & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		ret_val = LE16_TO_CPU(desc->ret_val);
+		q_msg[i].vmvf_type = (flags &
+				     (IDPF_CTLQ_FLAG_FTYPE_VM |
+				      IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+				      IDPF_CTLQ_FLAG_FTYPE_S;
+
+		if (flags & IDPF_CTLQ_FLAG_ERR)
+			ret_code = -EBADMSG;
+
+		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
+		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
+		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+		q_msg[i].status = ret_val;
+
+		if (desc->datalen) {
+			idpf_memcpy(q_msg[i].ctx.indirect.context,
+				    &desc->params.indirect,
+				    IDPF_INDIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+
+			/* Assign pointer to dma buffer to ctlq_msg array
+			 * to be given to upper layer
+			 */
+			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+			/* Zero out pointer to DMA buffer info;
+			 * will be repopulated by post buffers API
+			 */
+			cq->bi.rx_buff[ntc] = NULL;
+		} else {
+			idpf_memcpy(q_msg[i].ctx.direct,
+				    desc->params.raw,
+				    IDPF_DIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+		}
+
+		/* Zero out stale data in descriptor */
+		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+			    IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	};
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+	*num_q_msg = i;
+	if (*num_q_msg == 0)
+		ret_code = -ENOMSG;
+
+	return ret_code;
+}
+
+int
+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+		    struct idpf_ctlq_info **cq)
+{
+	return cpfl_ctlq_add(hw, qinfo, cq);
+}
+
+/**
+ * cpfl_ctlq_shutdown - shutdown the CQ
+ * The main shutdown routine for any controq queue
+ */
+static void
+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (!cq->ring_size)
+		goto shutdown_sq_out;
+
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+
+	/* Set ring_size to 0 to indicate uninitialized queue */
+	cq->ring_size = 0;
+
+shutdown_sq_out:
+	idpf_release_lock(&cq->cq_lock);
+	idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * cpfl_ctlq_remove - deallocate and remove specified control queue
+ */
+static void
+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	LIST_REMOVE(cq, cq_list);
+	cpfl_ctlq_shutdown(hw, cq);
+	idpf_free(hw, cq);
+}
+
+void
+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	cpfl_ctlq_remove(hw, cq);
+}
+
+int
+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		     struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			      uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
+
+int
+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			 struct idpf_ctlq_msg *msg_status[])
+{
+	return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
new file mode 100644
index 0000000000..740ae6522c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CONTROLQ_H_
+#define _CPFL_CONTROLQ_H_
+
+#include "base/idpf_osdep.h"
+#include "base/idpf_controlq_api.h"
+
+#define CPFL_CTLQ_DESCRIPTOR_SIZE	32
+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE	4096
+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE	256
+#define CPFL_DFLT_MBX_RING_LEN		512
+#define CPFL_CFGQ_RING_LEN		512
+
+/* CRQ/CSQ specific error codes */
+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */
+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */
+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */
+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */
+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct cpfl_ctlq_create_info {
+	enum idpf_ctlq_type type;
+	int id; /* absolute queue offset passed as input
+		 * -1 for default mailbox if present
+		 */
+	uint16_t len; /* Queue length passed as input */
+	uint16_t buf_size; /* buffer size passed as input */
+	uint64_t base_address; /* output, HPA of the Queue start  */
+	struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+	/* Pass down previously allocated descriptor ring and buffer memory
+	 * for each control queue to be created
+	 */
+	struct idpf_dma_mem ring_mem;
+	/* The CP will allocate one large buffer that the CPFlib will piece
+	 * into individual buffers for each descriptor
+	 */
+	struct idpf_dma_mem buf_mem;
+
+	int ext_info_size;
+	void *ext_info; /* Specific to q type */
+};
+
+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
+			     struct idpf_ctlq_info *cq,
+			     struct cpfl_ctlq_create_info *qinfo);
+int cpfl_ctlq_add(struct idpf_hw *hw,
+		  struct cpfl_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq);
+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+		       struct idpf_ctlq_msg *msg_status[]);
+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			    u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg);
+int cpfl_vport_ctlq_add(struct idpf_hw *hw,
+			struct cpfl_ctlq_create_info *qinfo,
+			struct idpf_ctlq_info **cq);
+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			 u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+			 struct idpf_ctlq_msg q_msg[]);
+
+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+				  u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+			     struct idpf_ctlq_msg *msg_status[]);
+#endif
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c350728861..a2bc6784d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1698,6 +1698,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 		return;
 	}
 
+	/* ignore if it is ctrl vport */
+	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
+		return;
+
 	vport = cpfl_find_vport(adapter, vc_event->vport_id);
 	if (!vport) {
 		PMD_DRV_LOG(ERR, "Can't find vport.");
@@ -1893,6 +1897,262 @@ cpfl_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 }
 
+static int
+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	ret = cpfl_config_ctlq_tx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
+		return ret;
+	}
+
+	ret = cpfl_config_ctlq_rx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
+		return ret;
+	}
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	int i;
+
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (adapter->ctlqp[i])
+			cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+}
+
+static int
+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_ctlq_info *cfg_cq;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		cfg_cq = NULL;
+		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
+					  &adapter->cfgq_info[i],
+					  &cfg_cq);
+		if (ret || !cfg_cq) {
+			PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d",
+				    adapter->cfgq_info[i].id);
+			cpfl_remove_cfgqs(adapter);
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
+			    adapter->cfgq_info[i].id);
+		adapter->ctlqp[i] = cfg_cq;
+	}
+
+	return ret;
+}
+
+#define CPFL_CFGQ_RING_LEN		512
+#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
+#define CPFL_CFGQ_BUFFER_SIZE		256
+#define CPFL_CFGQ_RING_SIZE		512
+
+static int
+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	struct cpfl_vport *vport;
+	int i, err;
+	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);
+	uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;
+
+	vport = &adapter->ctrl_vport;
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (i % 2 == 0) {
+			/* Setup Tx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +
+				i / 2 * vport->base.chunks_info.tx_qtail_spacing;
+		} else {
+			/* Setup Rx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +
+				i / 2 * vport->base.chunks_info.rx_qtail_spacing;
+			if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,
+						buf_size)) {
+				err = -ENOMEM;
+				goto free_mem;
+			}
+		}
+		if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,
+					ring_size)) {
+			err = -ENOMEM;
+			goto free_mem;
+		}
+	}
+	return 0;
+free_mem:
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+	return err;
+}
+
+static int
+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_create_vport *vport_info =
+		(struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;
+	int i;
+
+	vport->itf.adapter = adapter;
+	vport->base.adapter = &adapter->base;
+	vport->base.vport_id = vport_info->vport_id;
+
+	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+		if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {
+			vport->base.chunks_info.tx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.tx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.tx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {
+			vport->base.chunks_info.rx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.rx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.rx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else {
+			PMD_INIT_LOG(ERR, "Unsupported chunk type");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)
+{
+	cpfl_stop_cfgqs(adapter);
+	cpfl_remove_cfgqs(adapter);
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+}
+
+static int
+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_vc_create_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to create control vport");
+		return ret;
+	}
+
+	ret = cpfl_init_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init control vport");
+		goto err_init_ctrl_vport;
+	}
+
+	ret = cpfl_cfgq_setup(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control queues");
+		goto err_cfgq_setup;
+	}
+
+	ret = cpfl_add_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to add control queues");
+		goto err_add_cfgq;
+	}
+
+	ret = cpfl_start_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to start control queues");
+		goto err_start_cfgqs;
+	}
+
+	return 0;
+
+err_start_cfgqs:
+	cpfl_stop_cfgqs(adapter);
+err_add_cfgq:
+	cpfl_remove_cfgqs(adapter);
+err_cfgq_setup:
+err_init_ctrl_vport:
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+
+	return ret;
+}
+
 static struct virtchnl2_get_capabilities req_caps = {
 	.csum_caps =
 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
@@ -2060,6 +2320,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+	ret = cpfl_ctrl_path_open(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control path");
+		goto err_create_ctrl_vport;
+	}
+
 #ifdef RTE_HAS_JANSSON
 	ret = cpfl_flow_init(adapter);
 	if (ret) {
@@ -2076,7 +2342,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 #ifdef RTE_HAS_JANSSON
 err_flow_init:
+	cpfl_ctrl_path_close(adapter);
 #endif
+err_create_ctrl_vport:
+	rte_free(adapter->vports);
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2315,6 +2584,7 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 #ifdef RTE_HAS_JANSSON
 	cpfl_flow_uninit(adapter);
 #endif
+	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 69bf32cfbd..7f83d170d7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -22,6 +22,7 @@
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
 #include "cpfl_representor.h"
+#include "cpfl_controlq.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -82,6 +83,10 @@
 #define CPFL_META_CHUNK_LENGTH	1024
 #define CPFL_META_LENGTH	32
 
+#define CPFL_RX_CFGQ_NUM	4
+#define CPFL_TX_CFGQ_NUM	4
+#define CPFL_CFGQ_NUM		8
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -212,6 +217,12 @@ struct cpfl_adapter_ext {
 	struct cpfl_flow_js_parser *flow_parser;
 
 	struct cpfl_metadata meta;
+
+	/* ctrl vport and ctrl queues. */
+	struct cpfl_vport ctrl_vport;
+	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
+	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
+	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -226,6 +237,9 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 			   struct cpchnl2_vport_id *vport_id,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_info_response *response);
+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
index a21a4a451f..932840a972 100644
--- a/drivers/net/cpfl/cpfl_vchnl.c
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 
 	return 0;
 }
+
+int
+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct virtchnl2_create_vport vport_msg;
+	struct idpf_cmd_info args;
+	int err = -1;
+
+	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
+	vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+	vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
+	vport_msg.num_tx_complq = 0;
+	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
+	vport_msg.num_rx_bufq = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+	args.in_args = (uint8_t *)&vport_msg;
+	args.in_args_size = sizeof(vport_msg);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+		return err;
+	}
+
+	rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
+		   IDPF_DFLT_MBX_BUF_SIZE);
+	return err;
+}
+
+int
+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_RX_CFGQ_NUM;
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (!vc_rxqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->base.vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		rxq_info = &vc_rxqs->qinfo[i];
+		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
+		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;
+		rxq_info->max_pkt_size = vport->base.max_pkt_len;
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_rxqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_TX_CFGQ_NUM;
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (!vc_txqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->base.vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		txq_info = &vc_txqs->qinfo[i];
+		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
+		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_txqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index f5654d5b0e..290ff1e655 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -18,6 +18,7 @@ sources = files(
         'cpfl_rxtx.c',
         'cpfl_vchnl.c',
         'cpfl_representor.c',
+        'cpfl_controlq.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v9 5/9] net/cpfl: add FXP low level implementation
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
                                 ` (4 preceding siblings ...)
  2023-09-28  8:44               ` [PATCH v9 4/9] net/cpfl: set up control path yuying.zhang
@ 2023-09-28  8:44               ` yuying.zhang
  2023-09-28  8:44               ` [PATCH v9 6/9] net/cpfl: add fxp rule module yuying.zhang
                                 ` (7 subsequent siblings)
  13 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-28  8:44 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add low level implementation for CPFL PMD to create / delete
rules on IPU's Flexible Packet Processor(FXP).

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_actions.h | 858 ++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_rules.c   | 127 +++++
 drivers/net/cpfl/cpfl_rules.h   | 306 ++++++++++++
 drivers/net/cpfl/meson.build    |   1 +
 4 files changed, 1292 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h
new file mode 100644
index 0000000000..7b82119e39
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_actions.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_ACTIONS_H_
+#define _CPFL_ACTIONS_H_
+
+#include "base/idpf_osdep.h"
+
+#pragma pack(1)
+
+union cpfl_action_set {
+	uint32_t data;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 4;
+		uint32_t tag : 1;
+		uint32_t prec : 3;
+	} set_24b_a;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 3;
+		uint32_t tag : 2;
+		uint32_t prec : 3;
+	} set_24b_b;
+
+	struct {
+		uint32_t val : 16;
+		uint32_t idx : 4;
+		uint32_t unused : 6;
+		uint32_t tag : 3;
+		uint32_t prec : 3;
+	} set_16b;
+
+	struct {
+		uint32_t val_a : 8;
+		uint32_t val_b : 8;
+		uint32_t idx_a : 4;
+		uint32_t idx_b : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_8b;
+
+	struct {
+		uint32_t val : 10;
+		uint32_t ena : 10;
+		uint32_t idx : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_1b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} nop;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} chained_24b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} aux_flags;
+};
+
+struct cpfl_action_set_ext {
+#define CPFL_ACTION_SET_EXT_CNT 2
+	union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT];
+};
+
+#pragma pack()
+
+/**
+ * cpfl_act_nop - Encode a NOP action
+ */
+static inline union cpfl_action_set
+cpfl_act_nop(void)
+{
+	union cpfl_action_set act;
+
+	act.data = 0;
+	return act;
+}
+
+/**
+ * cpfl_is_nop_action - Indicate if an action set is a NOP
+ */
+static inline bool
+cpfl_is_nop_action(union cpfl_action_set *act)
+{
+	return act->data == cpfl_act_nop().data;
+}
+
+#define CPFL_MAKE_MASK32(b, s)	((((uint32_t)1 << (b)) - 1) << (s))
+
+#define CPFL_ACT_PREC_MAX	7
+#define CPFL_ACT_PREC_S		29
+#define CPFL_ACT_PREC_M		CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S)
+#define CPFL_ACT_PREC_SET(p)	\
+	(((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M)
+#define CPFL_ACT_PREC_CHECK(p)	((p) > 0 && (p) <= CPFL_ACT_PREC_MAX)
+
+#define CPFL_METADATA_ID_CNT		32	/* Max number of metadata IDs */
+#define CPFL_METADATA_STRUCT_MAX_SZ	128	/* Max metadata size per ID */
+
+/*******************************************************************************
+ * 1-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_1B_OP_S	24
+#define CPFL_ACT_1B_OP_M	CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S)
+#define CPFL_ACT_1B_OP		((uint32_t)(0x01) << CPFL_ACT_1B_OP_S)
+
+#define CPFL_ACT_1B_VAL_S	0
+#define CPFL_ACT_1B_VAL_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S)
+#define CPFL_ACT_1B_EN_S	10
+#define CPFL_ACT_1B_EN_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S)
+#define CPFL_ACT_1B_INDEX_S	20
+#define CPFL_ACT_1B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S)
+
+/* 1-bit actions currently uses only INDEX of 0 */
+#define CPFL_ACT_MAKE_1B(prec, en, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \
+	 ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \
+	 (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M))
+
+enum cpfl_act_1b_op {
+	CPFL_ACT_1B_OP_DROP		= 0x01,
+	CPFL_ACT_1B_OP_HDR_SPLIT	= 0x02,
+	CPFL_ACT_1B_OP_DIR_CHANGE	= 0x04,
+	CPFL_ACT_1B_OP_DEFER_DROP	= 0x08,
+	CPFL_ACT_1B_OP_ORIG_MIR_MD	= 0x80
+};
+
+#define CPFL_ACT_1B_COMMIT_MODE_S	4
+#define CPFL_ACT_1B_COMMIT_MODE_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S)
+
+/**
+ * cpfl_act_commit_mode - action commit mode for certain action classes
+ */
+enum cpfl_act_commit_mode {
+	/* Action processing for the initial classification pass */
+	CPFL_ACT_COMMIT_ALL		= 0, /* Commit all actions */
+	CPFL_ACT_COMMIT_PRE_MOD		= 1, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_NONE		= 2, /* Commit no action */
+	/* Action processing for deferred actions in a recirculation pass */
+	CPFL_ACT_COMMIT_RECIR_ALL	= 4, /* Commit all actions */
+	CPFL_ACT_COMMIT_RECIR_PRE_MOD	= 5, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_RECIR_NONE	= 6  /* Commit no action */
+};
+
+/*******************************************************************************
+ * 8-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_8B_S	24
+#define CPFL_ACT_OP_8B_M	CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S)
+#define CPFL_ACT_OP_8B		((uint32_t)(0x02) << CPFL_ACT_OP_8B_S)
+
+#define CPFL_ACT_8B_A_VAL_S	0
+#define CPFL_ACT_8B_A_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S)
+#define CPFL_ACT_8B_A_INDEX_S	16
+#define CPFL_ACT_8B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S)
+
+#define CPFL_ACT_8B_B_VAL_S	8
+#define CPFL_ACT_8B_B_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S)
+#define CPFL_ACT_8B_B_INDEX_S	20
+#define CPFL_ACT_8B_B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S)
+
+/* Unless combining two 8-bit actions into an action set, both A and B fields
+ * must be the same,
+ */
+#define CPFL_ACT_MAKE_8B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \
+	 (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \
+	 (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \
+	 (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \
+	 (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M))
+
+/* 8-Bit Action Indices */
+#define CPFL_ACT_8B_INDEX_MOD_META		9
+
+/* 8-Bit Action Miscellaneous */
+#define CPFL_ACT_8B_MOD_META_PROF_CNT		16
+#define CPFL_ACT_8B_MOD_META_VALID		0x80
+
+/*******************************************************************************
+ * 16-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_16B_S	26
+#define CPFL_ACT_OP_16B_M	CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S)
+#define CPFL_ACT_OP_16B		((uint32_t)0x1 << CPFL_ACT_OP_16B_S)
+
+#define CPFL_ACT_16B_INDEX_S	16
+#define CPFL_ACT_16B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S)
+#define CPFL_ACT_16B_VAL_S	0
+#define CPFL_ACT_16B_VAL_M	CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S)
+
+#define CPFL_ACT_MAKE_16B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \
+	 (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M))
+
+/* 16-Bit Action Indices */
+#define CPFL_ACT_16B_INDEX_COUNT_SET		0
+#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX	1
+#define CPFL_ACT_16B_INDEX_SET_VSI		2
+#define CPFL_ACT_16B_INDEX_DEL_MD		4
+#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST		5
+
+/* 16-Bit Action Miscellaneous */
+#define CPFL_ACT_16B_COUNT_SET_CNT		2048 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_SET_VSI_SLOTS		2
+#define CPFL_ACT_16B_FWD_VSI_CNT		1032 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_FWD_VSI_LIST_CNT		256
+#define CPFL_ACT_16B_MOD_VSI_LIST_CNT		1024
+#define CPFL_ACT_16B_FWD_PORT_CNT		4
+#define CPFL_ACT_16B_DEL_MD_MID_CNT		32
+#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS		4
+
+/* 16-Bit SET_MCAST_IDX Action */
+#define CPFL_ACT_16B_SET_MCAST_VALID	((uint32_t)1 << 15)
+
+/* 16-Bit SET_VSI Action Variants */
+#define CPFL_ACT_16B_SET_VSI_VAL_S		0
+#define CPFL_ACT_16B_SET_VSI_VAL_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S)
+#define CPFL_ACT_16B_SET_VSI_PE_S		11
+#define CPFL_ACT_16B_SET_VSI_PE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S)
+#define CPFL_ACT_16B_SET_VSI_TYPE_S		14
+#define CPFL_ACT_16B_SET_VSI_TYPE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S)
+
+/* 16-Bit DEL_MD Action */
+#define CPFL_ACT_16B_DEL_MD_0_S		0
+#define CPFL_ACT_16B_DEL_MD_1_S		5
+
+/* 16-Bit MOD_VSI_LIST Actions */
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S	0
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M	\
+	CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S)
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S	14
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S)
+#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \
+	((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \
+	 (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_ID_M))
+
+#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \
+	((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \
+		CPFL_ACT_16B_SET_VSI_TYPE_M) | \
+	 (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \
+		CPFL_ACT_16B_SET_VSI_PE_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \
+		CPFL_ACT_16B_SET_VSI_VAL_M))
+
+enum cpfl_prot_eng {
+	CPFL_PE_LAN = 0,
+	CPFL_PE_RDMA,
+	CPFL_PE_CRT
+};
+
+enum cpfl_act_fwd_type {
+	CPFL_ACT_FWD_VSI,
+	CPFL_ACT_FWD_VSI_LIST,
+	CPFL_ACT_FWD_PORT
+};
+
+/*******************************************************************************
+ * 24-Bit Actions
+ ******************************************************************************/
+/* Group A */
+#define CPFL_ACT_OP_24B_A_S	28
+#define CPFL_ACT_OP_24B_A_M	CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S)
+#define CPFL_ACT_24B_A_INDEX_S	24
+#define CPFL_ACT_24B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S)
+#define CPFL_ACT_24B_A_VAL_S	0
+#define CPFL_ACT_24B_A_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S)
+
+#define CPFL_ACT_OP_24B_A	((uint32_t)1 << CPFL_ACT_OP_24B_A_S)
+
+#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_MOD_ADDR	0
+#define CPFL_ACT_24B_INDEX_MIRROR_FIRST	1
+#define CPFL_ACT_24B_INDEX_COUNT	2
+#define CPFL_ACT_24B_INDEX_SET_Q	8
+#define CPFL_ACT_24B_INDEX_MOD_PROFILE	9
+#define CPFL_ACT_24B_INDEX_METER	10
+
+#define CPFL_ACT_24B_COUNT_SLOTS	6
+#define CPFL_ACT_24B_METER_SLOTS	6
+
+#define CPFL_ACT_24B_MOD_ADDR_CNT	(16 * 1024 * 1024)
+#define CPFL_ACT_24B_COUNT_ID_CNT	((uint32_t)1 << 24)
+#define CPFL_ACT_24B_SET_Q_CNT		(12 * 1024)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS	3
+
+/* 24-Bit SET_Q Action */
+#define CPFL_ACT_24B_SET_Q_Q_S		0
+#define CPFL_ACT_24B_SET_Q_Q_M		\
+	CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_S	14
+#define CPFL_ACT_24B_SET_Q_Q_RGN_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S)
+#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS	CPFL_MAKE_MASK32(1, 17)
+#define CPFL_ACT_24B_SET_Q_DST_PE_S	21
+#define CPFL_ACT_24B_SET_Q_DST_PE_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S)
+#define CPFL_ACT_24B_SET_Q_VALID	CPFL_MAKE_MASK32(1, 23)
+
+/* 24-Bit MOD_PROFILE Action */
+enum cpfl_act_mod_profile_hint {
+	CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */
+};
+
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_S		0
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S)
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S	12
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S)
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_S		14
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S)
+#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS		((uint32_t)1 << 16)
+#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND	((uint32_t)1 << 17)
+#define CPFL_ACT_24B_MOD_PROFILE_VALID			((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES	4
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT		2048
+
+/* 24-Bit METER Actions */
+#define CPFL_ACT_24B_METER_INDEX_S	0
+#define CPFL_ACT_24B_METER_INDEX_M	\
+	CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S)
+#define CPFL_ACT_24B_METER_BANK_S	20
+#define CPFL_ACT_24B_METER_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S)
+#define CPFL_ACT_24B_METER_VALID	((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_METER_BANK_CNT	6
+#define CPFL_ACT_24B_METER_INDEX_CNT	((uint32_t)1 << 20)
+
+/* Group B */
+#define CPFL_ACT_OP_24B_B_S	27
+#define CPFL_ACT_OP_24B_B_M	CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S)
+#define CPFL_ACT_24B_B_INDEX_S	24
+#define CPFL_ACT_24B_B_INDEX_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S)
+#define CPFL_ACT_24B_B_VAL_S	0
+#define CPFL_ACT_24B_B_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S)
+
+#define CPFL_ACT_OP_24B_B	((uint32_t)1 << CPFL_ACT_OP_24B_B_S)
+
+#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_SET_MD	0
+#define CPFL_ACT_24B_INDEX_RANGE_CHECK	6
+#define CPFL_ACT_24B_SET_MD_SLOTS	6
+
+/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */
+/* 8-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD8_VAL_S	0
+#define CPFL_ACT_24B_SET_MD8_VAL_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S)
+#define CPFL_ACT_24B_SET_MD8_MASK_S	8
+#define CPFL_ACT_24B_SET_MD8_MASK_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S)
+#define CPFL_ACT_24B_SET_MD8_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD8_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S)
+/* 16-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD16_VAL_S	0
+#define CPFL_ACT_24B_SET_MD16_VAL_M	\
+	CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_L_S	16 /* For chained action */
+#define CPFL_ACT_24B_SET_MD16_MASK_L_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_H_SR	8
+#define CPFL_ACT_24B_SET_MD16_MASK_H_M	0xff
+#define CPFL_ACT_24B_SET_MD16_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD16_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD16		((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_SET_MD32_VAL_L_M	CPFL_MAKE_MASK32(24, 0)
+
+#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX		15
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX	7
+#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX	15
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX	7
+
+/* RANGE_CHECK Action */
+enum cpfl_rule_act_rc_mode {
+	CPFL_RULE_ACT_RC_1_RANGE = 0,
+	CPFL_RULE_ACT_RC_2_RANGES = 1,
+	CPFL_RULE_ACT_RC_4_RANGES = 2,
+	CPFL_RULE_ACT_RC_8_RANGES = 3
+};
+
+#define CPFL_ACT_24B_RC_TBL_IDX_S	0
+#define CPFL_ACT_24B_RC_TBL_IDX_M	\
+	CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S)
+#define CPFL_ACT_24B_RC_START_BANK_S	13
+#define CPFL_ACT_24B_RC_START_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S)
+#define CPFL_ACT_24B_RC_MODE_S		16
+#define CPFL_ACT_24B_RC_MODE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S)
+#define CPFL_ACT_24B_RC_XTRACT_PROF_S	18
+#define CPFL_ACT_24B_RC_XTRACT_PROF_M	\
+	CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S)
+
+#define CPFL_ACT_24B_RC_TBL_INDEX_CNT	(8 * 1024)
+#define CPFL_ACT_24B_RC_BANK_CNT	8
+#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT	64
+
+/*******************************************************************************
+ * 24-Bit Chained Auxiliary Actions
+ ******************************************************************************/
+
+/* TODO: HAS is being updated.  Revise the order of chained and base action
+ * when the HAS has it finalized.
+ */
+/**
+ * 24-Bit Chained SET_MD Actions
+ *
+ * Chained SET_MD actions consume two consecutive action sets.  The first one is
+ * the chained AUX action set.  The second one is the base/parent action set.
+ * Chained SET_MD actions can add and/or update metadata structure with IDs from
+ * 0 to 31 while the non-chained SET_MD variants can only update existing meta-
+ * data IDs below 16.
+ */
+
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S	8
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M	\
+	CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD_AUX_ADD		((uint32_t)1 << 15)
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S	16
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_S		0
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_M		\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S	0
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S)
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR	24 /* Upper 8 bits of MD32 */
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M	0xff
+
+#define CPFL_ACT_TYPE_CHAIN_DATA_S	29
+#define CPFL_ACT_TYPE_CHAIN_DATA_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S)
+#define CPFL_ACT_TYPE_CHAIN_DATA	((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_OP_S	21
+#define CPFL_ACT_24B_SET_MD_OP_8B	((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_16B	((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_32B	((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \
+	(CPFL_ACT_TYPE_CHAIN_DATA | (op) | \
+	 (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \
+	 (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \
+	 (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_DATA_M))
+
+/*******************************************************************************
+ * 1-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_drop - Encode a 1-bit DROP action
+ *
+ * The DROP action has precedence over the DEFER_DOP action.
+ * Affect of ACT_COMMIT action on the DROP action:
+ *  - CPFL_ACT_COMMIT_ALL: Packet is dropped.
+ *  - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped.
+ *  - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped.  Recirculation is canceled.
+ *  - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not
+ *    dropped. Recirculation continues.
+ *
+ * Once a DROP action is set, it cannot be reverted during the classification
+ * process of a network packet.
+ */
+static inline union cpfl_action_set
+cpfl_act_drop(uint8_t prec)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1);
+	return a;
+}
+
+/**
+ * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action
+ * An ACT_COMMIT action specifies if and when all actions are committed.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M,
+				  (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S);
+	return a;
+}
+
+/*******************************************************************************
+ * 8-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_meta - Encode an 8-bit MOD_META action
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_meta(uint8_t prec, uint8_t prof)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META,
+				  CPFL_ACT_8B_MOD_META_VALID | prof);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 16-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI)
+ *
+ * This encodes the "Forward to Single VSI" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    vsi >= CPFL_ACT_16B_FWD_VSI_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port)
+ *
+ * This encodes the "Forward to a port" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    port >= CPFL_ACT_16B_FWD_PORT_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 24-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action
+ *
+ * This MOD_ADDR specifies the index of the MOD content entry an accompanying
+ * MOD_PROFILE action uses.  Some MOD_PROFILE actions may need to use extra
+ * information from a Modify content entry, and requires an accompanying
+ * MOD_ADDR action.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR,
+				     mod_addr);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant)
+ *
+ * This action is a "Forward to a single queue" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q,
+			bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region)
+ *
+ * This action is a "Forward to a queue region" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base,
+			       uint8_t q_rgn_bits, bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT ||
+	    q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base |
+		((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action
+ *
+ * This action specifies a Modify profile to use for modifying the network
+ * packet being classified.  In addition, it also provides a hint to whether
+ * or not an accompanied MOD_ADDR action is expected and should be prefetched.
+ *
+ * There is only one MOD_PROFILE action slot.  If multiple classification blocks
+ * emit this action, the precedence value and auxiliary precedence value will be
+ * used to select one with higher precedence.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus,
+		     bool miss_prepend, enum cpfl_act_mod_profile_hint hint)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) ||
+	    prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT ||
+	    ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_MOD_PROFILE_VALID |
+		(((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) &
+			CPFL_ACT_24B_MOD_PROFILE_HINT_M) |
+		(((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) &
+			CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) |
+		((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S);
+	if (append_act_bus)
+		val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS;
+	if (miss_prepend)
+		val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND;
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_meter - Encode a 24-bit METER action
+ *
+ * Return NOP if any given input parameter is invalid.
+ *
+ * A bank can only be used by one of the METER action slots.  If multiple METER
+ * actions select the same bank, the action with the highest action slot wins.
+ * In Policer mode, METER actions at the higher indexes have precedence over
+ * ones at lower indexes.
+ */
+static inline union cpfl_action_set
+cpfl_act_meter(uint8_t slot, uint8_t prec, uint32_t idx, uint8_t bank)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS  ||
+	    idx >= CPFL_ACT_24B_METER_INDEX_CNT ||
+	    bank >= CPFL_ACT_24B_METER_BANK_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_METER_VALID |
+		(uint32_t)idx << CPFL_ACT_24B_METER_INDEX_S |
+		(uint32_t)bank << CPFL_ACT_24B_METER_BANK_S;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot,
+				     val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot
+ *
+ * This SET_MD action sets/updates a byte of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 bytes of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX ||
+	    off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) |
+		((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) |
+		((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot
+ *
+ * This SET_MD action sets/updates a word of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 words of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX ||
+	    word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) |
+		((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) |
+		((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot
+ *
+ * This SET_MD action sets/updates a dword of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action is made up of 2 chained
+ * action sets.  The chained action set is the first.  The base/parent action
+ * sets is the second.
+ */
+static inline void
+cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid,
+		      uint8_t off, uint32_t val)
+{
+	if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) ||
+	    mid >= CPFL_METADATA_ID_CNT ||
+	    (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) {
+		ext->acts[0] = cpfl_act_nop();
+		ext->acts[1] = cpfl_act_nop();
+	} else {
+		uint32_t tmp;
+
+		/* Chained action set comes first */
+		tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR;
+		ext->acts[0].data =
+			CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B,
+						     mid, off, tmp);
+
+		/* Lower 24 bits of value */
+		tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M;
+		ext->acts[1].data =
+			CPFL_ACT_MAKE_24B_B(prec,
+					    CPFL_ACT_24B_INDEX_SET_MD + slot,
+					    tmp);
+	}
+}
+
+#endif /* _CPFL_ACTIONS_H_ */
diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c
new file mode 100644
index 0000000000..3d259d3da8
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include <base/idpf_controlq.h>
+#include <stdint.h>
+#include "cpfl_rules.h"
+
+ /**
+  * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor
+  */
+static inline uint64_t
+cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
+{
+	uint64_t context = 0;
+
+	switch (cmn_cfg->opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		/* fallthrough */
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+	case cpfl_ctlq_sem_add_rule:
+	case cpfl_ctlq_sem_del_rule:
+	case cpfl_ctlq_sem_query_rule:
+	case cpfl_ctlq_sem_update_rule:
+		context |= SHIFT_VAL64(cmn_cfg->time_sel,
+				       MEV_RULE_TIME_SEL);
+		context |= SHIFT_VAL64(cmn_cfg->time_sel_val,
+				       MEV_RULE_TIME_SEL_VAL);
+		context |= SHIFT_VAL64(cmn_cfg->host_id,
+				       MEV_RULE_HOST_ID);
+		context |= SHIFT_VAL64(cmn_cfg->port_num,
+				       MEV_RULE_PORT_NUM);
+		context |= SHIFT_VAL64(cmn_cfg->resp_req,
+				       MEV_RULE_RESP_REQ);
+		context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru,
+				       MEV_RULE_CACHE_WR_THRU);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc_ctx - get bit context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
+{
+	uint64_t context = 0;
+
+	context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common);
+
+	switch (cfg_data->common.opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size,
+				       MEV_RULE_MOD_OBJ_SIZE);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content,
+				       MEV_RULE_PIN_MOD_CONTENT);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.index,
+				       MEV_RULE_MOD_INDEX);
+		break;
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
+				       MEV_RULE_OBJ_ID);
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
+				       MEV_RULE_OBJ_ADDR);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc - build descriptor data from rule config data
+ *
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg)
+{
+	uint64_t context;
+	uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0];
+
+	context = cpfl_prep_rule_desc_ctx(cfg_data);
+	*ctlq_ctx = CPU_TO_LE64(context);
+	memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t));
+	ctlq_msg->opcode = (uint16_t)cfg_data->common.opc;
+	ctlq_msg->data_len = cfg_data->common.buf_len;
+	ctlq_msg->status = 0;
+	ctlq_msg->ctx.indirect.payload = cfg_data->common.payload;
+}
+
+/**
+ * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob)
+{
+	uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions;
+	const uint32_t *act_src = (const uint32_t *)act_bytes;
+	uint32_t i;
+
+	idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
+	idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len,
+		    CPFL_NONDMA_TO_DMA);
+
+	for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
+		*act_dst++ = CPU_TO_LE32(*act_src++);
+
+	rule_blob->sem_rule.cfg_ctrl[0] = cfg_ctrl & 0xFF;
+	rule_blob->sem_rule.cfg_ctrl[1] = (cfg_ctrl >> 8) & 0xFF;
+}
diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h
new file mode 100644
index 0000000000..d23eae8e91
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_RULES_API_H_
+#define _CPFL_RULES_API_H_
+
+#include <base/idpf_controlq_api.h>
+#include "cpfl_actions.h"
+#include "cpfl_controlq.h"
+
+/* Common Bit Mask Macros */
+#define CPFL_BIT(b)			(1 << (b))
+
+#define MAKE_MASK(type, mask, shift)	((u##type) (mask) << (shift))
+#define SHIFT_VAL_LT(type, val, field)		\
+		(((u##type)(val) << field##_S) & field##_M)
+#define SHIFT_VAL_RT(type, val, field)		\
+		(((u##type)(val) & field##_M) >> field##_S)
+
+#define MAKE_MASK_VAL(type, bit_len)	(((u##type)0x01 << (bit_len)) - 1)
+#define MAKE_MASK_VAL16(bit_len)	MAKE_MASK_VAL(16, bit_len)
+#define MAKE_MASK_VAL64(bit_len)	MAKE_MASK_VAL(64, bit_len)
+
+#define MAKE_MASK64(mask, shift)	MAKE_MASK(64, mask, shift)
+#define MAKE_MASK16(mask, shift)	MAKE_MASK(16, mask, shift)
+#define MAKE_MASK32(mask, shift)	MAKE_MASK(32, mask, shift)
+
+/* Make masks with bit length and left-shifting count */
+#define MAKE_SMASK(type, bits, shift)	\
+	((((u##type)1 << (bits)) - 1) << (shift))
+#define MAKE_SMASK64(bits, shift)	MAKE_SMASK(64, bits, shift)
+#define MAKE_SMASK32(bits, shift)	MAKE_SMASK(32, bits, shift)
+#define MAKE_SMASK16(bits, shift)	MAKE_SMASK(16, bits, shift)
+
+#define SHIFT_VAL64(val, field)		SHIFT_VAL_LT(64, val, field)
+#define SHIFT_VAL32(val, field)		SHIFT_VAL_LT(32, val, field)
+#define SHIFT_VAL16(val, field)		SHIFT_VAL_LT(16, val, field)
+
+/* Rule Config queue opcodes */
+enum cpfl_ctlq_rule_cfg_opc {
+	cpfl_ctlq_sem_add_rule				= 0x1303,
+	cpfl_ctlq_sem_update_rule			= 0x1304,
+	cpfl_ctlq_sem_del_rule				= 0x1305,
+	cpfl_ctlq_sem_query_rule			= 0x1306,
+	cpfl_ctlq_sem_query_rule_hash_addr		= 0x1307,
+	cpfl_ctlq_sem_query_del_rule_hash_addr		= 0x1308,
+
+	cpfl_ctlq_mod_add_update_rule			= 0x1360,
+	cpfl_ctlq_mod_query_rule			= 0x1361,
+};
+
+enum cpfl_cfg_pkt_error_code {
+	CPFL_CFG_PKT_ERR_OK = 0,
+	CPFL_CFG_PKT_ERR_ESRCH = 1,     /* Bad opcode */
+	CPFL_CFG_PKT_ERR_EEXIST = 2,    /* Entry Already exists */
+	CPFL_CFG_PKT_ERR_ENOSPC = 4,    /* No space left in the table*/
+	CPFL_CFG_PKT_ERR_ERANGE = 5,    /* Parameter out of range */
+	CPFL_CFG_PKT_ERR_ESBCOMP = 6,   /* Completion Error */
+	CPFL_CFG_PKT_ERR_ENOPIN = 7,    /* Entry cannot be pinned in cache */
+	CPFL_CFG_PKT_ERR_ENOTFND = 8,   /* Entry Not exists */
+	CPFL_CFG_PKT_ERR_EMAXCOL = 9    /* Max Hash Collision */
+};
+
+/* macros for creating context for rule descriptor */
+#define MEV_RULE_VSI_ID_S		0
+#define MEV_RULE_VSI_ID_M		\
+		MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S)
+
+#define MEV_RULE_TIME_SEL_S		13
+#define MEV_RULE_TIME_SEL_M		\
+		MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S)
+
+#define MEV_RULE_TIME_SEL_VAL_S		15
+#define MEV_RULE_TIME_SEL_VAL_M		\
+		MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S)
+
+#define MEV_RULE_PORT_NUM_S		16
+#define MEV_RULE_HOST_ID_S		18
+#define MEV_RULE_PORT_NUM_M		\
+		MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S)
+#define MEV_RULE_HOST_ID_M		\
+		MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S)
+
+#define MEV_RULE_CACHE_WR_THRU_S	21
+#define MEV_RULE_CACHE_WR_THRU_M	\
+		MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S)
+
+#define MEV_RULE_RESP_REQ_S		22
+#define MEV_RULE_RESP_REQ_M		\
+		MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S)
+#define MEV_RULE_OBJ_ADDR_S		24
+#define MEV_RULE_OBJ_ADDR_M		\
+		MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S)
+#define MEV_RULE_OBJ_ID_S		59
+#define MEV_RULE_OBJ_ID_M		\
+		MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S)
+
+/* macros for creating CFG_CTRL for sem/lem rule blob */
+#define MEV_RULE_CFG_CTRL_PROF_ID_S			0
+#define MEV_RULE_CFG_CTRL_PROF_ID_M			\
+		MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S)
+
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S		11
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M		\
+		MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S)
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_S		13
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S)
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S	14
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M	\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S)
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S		15
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S)
+
+/**
+ * macro to build the CFG_CTRL for rule packet data, which is one of
+ * cpfl_prep_sem_rule_blob()'s input parameter.
+ */
+ /* build SEM CFG_CTRL*/
+#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id,		       \
+				       pin_to_cache, fixed_fetch)	       \
+		(SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID)	     | \
+		 SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID)   | \
+		 SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE)    | \
+		 SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH))
+
+/* build LEM CFG_CTRL*/
+#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror)    \
+		(SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID)             | \
+		 SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE)      | \
+		 SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR))
+
+/* macros for creating mod content config packets */
+#define MEV_RULE_MOD_INDEX_S		24
+#define MEV_RULE_MOD_INDEX_M		\
+		MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S)
+
+#define MEV_RULE_PIN_MOD_CONTENT_S	62
+#define MEV_RULE_PIN_MOD_CONTENT_M	\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S)
+#define MEV_RULE_MOD_OBJ_SIZE_S		63
+#define MEV_RULE_MOD_OBJ_SIZE_M		\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S)
+
+/**
+ * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM
+ * note: The key may be in mixed big/little endian format, the rest of members
+ * are in little endian
+ */
+struct cpfl_sem_rule_cfg_pkt {
+#define MEV_SEM_RULE_KEY_SIZE 128
+	uint8_t key[MEV_SEM_RULE_KEY_SIZE];
+
+#define MEV_SEM_RULE_ACT_SIZE 72
+	uint8_t actions[MEV_SEM_RULE_ACT_SIZE];
+
+	/* Bit(s):
+	 * 10:0 : PROFILE_ID
+	 * 12:11: SUB_PROF_ID (used for SEM only)
+	 * 13   : pin the SEM key content into the cache
+	 * 14   : Reserved
+	 * 15   : Fixed_fetch
+	 */
+	uint8_t cfg_ctrl[2];
+
+	/* Bit(s):
+	 * 0:     valid
+	 * 15:1:  Hints
+	 * 26:16: PROFILE_ID, the profile associated with the entry
+	 * 31:27: PF
+	 * 55:32: FLOW ID (assigned by HW)
+	 * 63:56: EPOCH
+	 */
+	uint8_t ctrl_word[8];
+	uint8_t padding[46];
+};
+
+/**
+ * union cpfl_rule_cfg_pkt_record - Describes rule data blob
+ */
+union cpfl_rule_cfg_pkt_record {
+	struct cpfl_sem_rule_cfg_pkt sem_rule;
+	uint8_t pkt_data[256];
+	uint8_t mod_blob[256];
+};
+
+/**
+ * cpfl_rule_query_addr - LEM/SEM Rule Query Address structure
+ */
+struct cpfl_rule_query_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_query_del_addr - Rule Query and Delete Address
+ */
+struct cpfl_rule_query_del_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_mod_content - MOD Rule Content
+ */
+struct cpfl_rule_mod_content {
+	uint8_t	obj_size;
+	uint8_t	pin_content;
+	uint32_t	index;
+};
+
+/**
+ * cpfl_rule_cfg_data_common - data struct for all rule opcodes
+ *note: some rules may only require part of structure
+ */
+struct cpfl_rule_cfg_data_common {
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	uint64_t	cookie;
+	uint16_t	vsi_id;
+	uint8_t	port_num;
+	uint8_t	host_id;
+	uint8_t	time_sel;
+	uint8_t	time_sel_val;
+	uint8_t	cache_wr_thru;
+	uint8_t	resp_req;
+	uint32_t	ret_val;
+	uint16_t	buf_len;
+	struct idpf_dma_mem *payload;
+};
+
+/**
+ * cpfl_rule_cfg_data - rule config data
+ * note: Before sending rule to HW, caller needs to fill
+ *       in this struct then call cpfl_prep_rule_desc().
+ */
+struct cpfl_rule_cfg_data {
+	struct cpfl_rule_cfg_data_common common;
+	union {
+		struct cpfl_rule_query_addr query_addr;
+		struct cpfl_rule_query_del_addr query_del_addr;
+		struct cpfl_rule_mod_content mod_content;
+	} ext;
+};
+
+/**
+ * cpfl_fill_rule_mod_content - fill info for mod content
+ */
+static inline void
+cpfl_fill_rule_mod_content(uint8_t mod_obj_size,
+			   uint8_t pin_mod_content,
+			   uint32_t mod_index,
+			   struct cpfl_rule_mod_content *mod_content)
+{
+	mod_content->obj_size = mod_obj_size;
+	mod_content->pin_content = pin_mod_content;
+	mod_content->index = mod_index;
+}
+
+/**
+ * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes
+ * note: call this function before calls cpfl_prep_rule_desc()
+ */
+static inline void
+cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc,
+			       uint64_t cookie,
+			       uint16_t vsi_id,
+			       uint8_t port_num,
+			       uint8_t host_id,
+			       uint8_t time_sel,
+			       uint8_t time_sel_val,
+			       uint8_t cache_wr_thru,
+			       uint8_t resp_req,
+			       uint16_t payload_len,
+			       struct idpf_dma_mem *payload,
+			       struct cpfl_rule_cfg_data_common *cfg_cmn)
+{
+	cfg_cmn->opc = opc;
+	cfg_cmn->cookie = cookie;
+	cfg_cmn->vsi_id = vsi_id;
+	cfg_cmn->port_num = port_num;
+	cfg_cmn->resp_req = resp_req;
+	cfg_cmn->ret_val = 0;
+	cfg_cmn->host_id = host_id;
+	cfg_cmn->time_sel = time_sel;
+	cfg_cmn->time_sel_val = time_sel_val;
+	cfg_cmn->cache_wr_thru = cache_wr_thru;
+
+	cfg_cmn->buf_len = payload_len;
+	cfg_cmn->payload = payload;
+}
+
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg);
+
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob);
+
+#endif /* _CPFL_RULES_API_H_ */
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 290ff1e655..e2b6621cea 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -19,6 +19,7 @@ sources = files(
         'cpfl_vchnl.c',
         'cpfl_representor.c',
         'cpfl_controlq.c',
+	'cpfl_rules.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v9 6/9] net/cpfl: add fxp rule module
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
                                 ` (5 preceding siblings ...)
  2023-09-28  8:44               ` [PATCH v9 5/9] net/cpfl: add FXP low level implementation yuying.zhang
@ 2023-09-28  8:44               ` yuying.zhang
  2023-09-28  8:44               ` [PATCH v9 7/9] net/cpfl: add fxp flow engine yuying.zhang
                                 ` (6 subsequent siblings)
  13 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-28  8:44 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Implement FXP rule creation / destroying.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c   |  31 ++++
 drivers/net/cpfl/cpfl_ethdev.h   |   6 +
 drivers/net/cpfl/cpfl_fxp_rule.c | 263 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_fxp_rule.h |  68 ++++++++
 drivers/net/cpfl/meson.build     |   1 +
 5 files changed, 369 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a2bc6784d0..762fbddfe6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 #include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
 	adapter->vports[vport->sw_idx] = NULL;
+	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
 	rte_free(cpfl_vport);
 
 	return 0;
@@ -2466,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
 	return 0;
 }
 
+int
+cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size,
+			 int batch_size)
+{
+	int i;
+
+	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
+		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < batch_size; i++) {
+		dma[i].va = (void *)((char *)orig_dma->va + size * (i + 1));
+		dma[i].pa = orig_dma->pa + size * (i + 1);
+		dma[i].size = size;
+		dma[i].zone = NULL;
+	}
+	return 0;
+}
+
 static int
 cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
@@ -2515,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
 			    &dev->data->mac_addrs[0]);
 
+	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
+	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
+				       cpfl_vport->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		goto err_mac_addrs;
+
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
 		memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
 		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7f83d170d7..8eeeac9910 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -147,10 +147,14 @@ enum cpfl_itf_type {
 
 TAILQ_HEAD(cpfl_flow_list, rte_flow);
 
+#define CPFL_FLOW_BATCH_SIZE  490
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
 	struct cpfl_flow_list flow_list;
+	struct idpf_dma_mem flow_dma;
+	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
 	void *data;
 };
 
@@ -240,6 +244,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
+int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma,
+			     uint32_t size, int batch_size);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
new file mode 100644
index 0000000000..ea65e20507
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.c
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		   struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_msg **msg_ptr_list;
+	u16 clean_count = 0;
+	int num_cleaned = 0;
+	int retries = 0;
+	int ret = 0;
+
+	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+	if (!msg_ptr_list) {
+		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+		goto send_err;
+	}
+
+	while (retries <= CTLQ_SEND_RETRIES) {
+		clean_count = num_q_msg - num_cleaned;
+		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+					       &msg_ptr_list[num_cleaned]);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+			goto send_err;
+		}
+
+		num_cleaned += clean_count;
+		retries++;
+		if (num_cleaned >= num_q_msg)
+			break;
+		rte_delay_us_sleep(10);
+	}
+
+	if (retries > CTLQ_SEND_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for completions");
+		ret = -1;
+		goto send_err;
+	}
+
+send_err:
+	if (msg_ptr_list)
+		free(msg_ptr_list);
+err:
+	return ret;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		      struct idpf_ctlq_msg q_msg[])
+{
+	int retries = 0;
+	struct idpf_dma_mem *dma;
+	u16 i;
+	uint16_t buff_cnt;
+	int ret = 0;
+
+	retries = 0;
+	while (retries <= CTLQ_RECEIVE_RETRIES) {
+		rte_delay_us_sleep(10);
+		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+		    ret != CPFL_ERR_CTLQ_ERROR) {
+			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_EMPTY)
+			break;
+
+		/* TODO - process rx controlq message */
+		for (i = 0; i < num_q_msg; i++) {
+			if (q_msg[i].data_len > 0)
+				dma = q_msg[i].ctx.indirect.payload;
+			else
+				dma = NULL;
+
+			buff_cnt = dma ? 1 : 0;
+			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+			if (ret)
+				PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+		}
+		break;
+	}
+
+	if (retries > CTLQ_RECEIVE_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+static int
+cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		   struct idpf_ctlq_msg *msg)
+{
+	struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	struct cpfl_rule_cfg_data cfg = {0};
+
+	/* prepare rule blob */
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(&cfg, 0, sizeof(cfg));
+
+	/* fill info for both query and add/update */
+	cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+				   minfo->pin_mod_content,
+				   minfo->mod_index,
+				   &cfg.ext.mod_content);
+
+	/* only fill content for add/update */
+	memcpy(blob->mod_blob, minfo->mod_content,
+	       minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+	/* pack message */
+	cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+				       rinfo->cookie,
+				       0, /* vsi_id not used for mod */
+				       rinfo->port_num,
+				       NO_HOST_NEEDED,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       (u16)sizeof(*blob),
+				       (void *)dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		       struct idpf_ctlq_msg *msg, bool add)
+{
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	struct cpfl_rule_cfg_data cfg;
+	uint16_t cfg_ctrl;
+
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(msg, 0, sizeof(*msg));
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+							  rinfo->sem.sub_prof_id,
+							  rinfo->sem.pin_to_cache,
+							  rinfo->sem.fixed_fetch);
+		cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+					rinfo->act_bytes, rinfo->act_byte_len,
+					cfg_ctrl, blob);
+		opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+	} else {
+		PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
+		return -1;
+	}
+
+	cpfl_fill_rule_cfg_data_common(opc,
+				       rinfo->cookie,
+				       rinfo->vsi,
+				       rinfo->port_num,
+				       rinfo->host_id,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+	       struct idpf_ctlq_msg *msg, bool add)
+{
+	int ret = 0;
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
+			ret = -1;
+	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+		if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)
+			ret = -1;
+	} else {
+		PMD_INIT_LOG(ERR, "Invalid type of rule");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+int
+cpfl_rule_process(struct cpfl_itf *itf,
+		  struct idpf_ctlq_info *tx_cq,
+		  struct idpf_ctlq_info *rx_cq,
+		  struct cpfl_rule_info *rinfo,
+		  int rule_num,
+		  bool add)
+{
+	struct idpf_hw *hw = &itf->adapter->base.hw;
+	int i;
+	int ret = 0;
+
+	if (rule_num == 0)
+		return 0;
+
+	for (i = 0; i < rule_num; i++) {
+		ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Could not pack rule");
+			return ret;
+		}
+	}
+	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to send control message");
+		return ret;
+	}
+	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to update rule");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
new file mode 100644
index 0000000000..ed757b80b1
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+	uint16_t prof_id;
+	uint8_t sub_prof_id;
+	uint8_t key[CPFL_MAX_KEY_LEN];
+	uint8_t key_byte_len;
+	uint8_t pin_to_cache;
+	uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+	uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+	uint8_t mod_content_byte_len;
+	uint32_t mod_index;
+	uint8_t pin_mod_content;
+	uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+	CPFL_RULE_TYPE_NONE,
+	CPFL_RULE_TYPE_SEM,
+	CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+	enum cpfl_rule_type type;
+	uint64_t cookie;
+	uint8_t host_id;
+	uint8_t port_num;
+	uint8_t resp_req;
+	/* TODO: change this to be dynamically allocated/reallocated */
+	uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+	uint8_t act_byte_len;
+	/* vsi is used for lem and lpm rules */
+	uint16_t vsi;
+	uint8_t clear_mirror_1st_state;
+	/* mod related fields */
+	union {
+		struct cpfl_mod_rule_info mod;
+		struct cpfl_sem_rule_info sem;
+	};
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_process(struct cpfl_itf *itf,
+		      struct idpf_ctlq_info *tx_cq,
+		      struct idpf_ctlq_info *rx_cq,
+		      struct cpfl_rule_info *rinfo,
+		      int rule_num,
+		      bool add);
+int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		       struct idpf_ctlq_msg q_msg[]);
+int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+			  struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index e2b6621cea..6118a16329 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -45,6 +45,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
     sources += files(
 	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
+	    'cpfl_fxp_rule.c',
     )
     ext_deps += jansson_dep
 endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v9 7/9] net/cpfl: add fxp flow engine
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
                                 ` (6 preceding siblings ...)
  2023-09-28  8:44               ` [PATCH v9 6/9] net/cpfl: add fxp rule module yuying.zhang
@ 2023-09-28  8:44               ` yuying.zhang
  2023-09-28  8:44               ` [PATCH v9 8/9] net/cpfl: add flow support for representor yuying.zhang
                                 ` (5 subsequent siblings)
  13 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-28  8:44 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Adapt a flow engine to FXP implementation.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 doc/guides/nics/cpfl.rst                |  18 +-
 doc/guides/rel_notes/release_23_11.rst  |   1 +
 drivers/net/cpfl/cpfl_ethdev.h          |  27 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 583 ++++++++++++++++++++++++
 drivers/net/cpfl/meson.build            |   1 +
 5 files changed, 628 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index e17347d15c..ae5487f2f6 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -197,8 +197,22 @@ low level hardware resources.
 
     * For Ubuntu, it can be installed using `apt install libjansson-dev`
 
-- run testpmd with the json file
+- run testpmd with the json file, create two vports
 
    .. code-block:: console
 
-   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0-1],flow_parser="refpkg.json" -- -i
+
+#. Create one flow to forward ETH-IPV4-TCP from I/O port to a local(CPF's) vport. Flow should be created on
+   vport X. Group M should match fxp module. Action port_representor Y means forward packet to local vport Y::
+
+   .. code-block:: console
+
+   flow create X ingress group M pattern eth dst is 00:01:00:00:03:14 / ipv4 src is 192.168.0.1 \
+   dst is 192.168.0.2 / tcp / end actions port_representor port_id Y / end
+
+#. Send a matched packet, and it should be displayed on PMD::
+
+   .. code-block:: console
+
+   sendp(Ether(dst='00:01:00:00:03:14')/IP(src='192.168.0.1',dst='192.168.0.2')/TCP(),iface="ens25f0")
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 8536ce88f4..16cdd674d3 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -85,6 +85,7 @@ New Features
 * **Updated Intel cpfl driver.**
 
   * Added support for port representor.
+  * Added support for rte_flow.
 
 * **Updated Intel iavf driver.**
   * Added support for iavf auto-reset.
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 8eeeac9910..efb0eb5251 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -85,6 +85,8 @@
 
 #define CPFL_RX_CFGQ_NUM	4
 #define CPFL_TX_CFGQ_NUM	4
+#define CPFL_FPCP_CFGQ_TX	0
+#define CPFL_FPCP_CFGQ_RX	1
 #define CPFL_CFGQ_NUM		8
 
 /* bit[15:14] type
@@ -219,6 +221,8 @@ struct cpfl_adapter_ext {
 	struct rte_hash *repr_allowlist_hash;
 
 	struct cpfl_flow_js_parser *flow_parser;
+	struct rte_bitmap *mod_bm;
+	void *mod_bm_mem;
 
 	struct cpfl_metadata meta;
 
@@ -312,4 +316,27 @@ cpfl_get_vsi_id(struct cpfl_itf *itf)
 	return CPFL_INVALID_HW_ID;
 }
 
+static inline struct cpfl_itf *
+cpfl_get_itf_by_port_id(uint16_t port_id)
+{
+	struct rte_eth_dev *dev;
+
+	if (port_id >= RTE_MAX_ETHPORTS) {
+		PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS);
+		return NULL;
+	}
+
+	dev = &rte_eth_devices[port_id];
+	if (dev->state == RTE_ETH_DEV_UNUSED) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id);
+		return NULL;
+	}
+
+	if (!dev->data) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id);
+		return NULL;
+	}
+
+	return CPFL_DEV_TO_ITF(dev);
+}
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
new file mode 100644
index 0000000000..154af5bd35
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <math.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_memcpy.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include <ethdev_driver.h>
+#include "cpfl_rules.h"
+#include "cpfl_logs.h"
+#include "cpfl_ethdev.h"
+#include "cpfl_flow.h"
+#include "cpfl_fxp_rule.h"
+#include "cpfl_flow_parser.h"
+
+#define CPFL_COOKIE_DEF		0x1000
+#define CPFL_MOD_COOKIE_DEF	0x1237561
+#define CPFL_PREC_DEF		1
+#define CPFL_PREC_SET		5
+#define CPFL_TYPE_ID		3
+#define CPFL_OFFSET		0x0a
+#define CPFL_HOST_ID_DEF	0
+#define CPFL_PF_NUM_DEF		0
+#define CPFL_PORT_NUM_DEF	0
+#define CPFL_RESP_REQ_DEF	2
+#define CPFL_PIN_TO_CACHE_DEF	0
+#define CPFL_CLEAR_MIRROR_1ST_STATE_DEF	0
+#define CPFL_FIXED_FETCH_DEF	0
+#define CPFL_PTI_DEF		0
+#define CPFL_MOD_OBJ_SIZE_DEF	0
+#define CPFL_PIN_MOD_CONTENT_DEF	0
+
+#define CPFL_MAX_MOD_CONTENT_INDEX	256
+#define CPFL_MAX_MR_ACTION_NUM	8
+
+/* Struct used when parse detailed rule information with json file */
+struct cpfl_rule_info_meta {
+	struct cpfl_flow_pr_action pr_action;	/* json action field of pattern rule */
+	uint32_t pr_num;			/* number of pattern rules */
+	uint32_t mr_num;			/* number of modification rules */
+	uint32_t rule_num;			/* number of all rules */
+	struct cpfl_rule_info rules[0];
+};
+
+static uint32_t cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad);
+static void cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
+uint64_t cpfl_rule_cookie = CPFL_COOKIE_DEF;
+
+static int
+cpfl_fxp_create(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim = meta;
+	struct cpfl_vport *vport;
+
+	if (!rim)
+		return ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		/* Every vport has one pair control queues configured to handle message.
+		 * Even index is tx queue and odd index is rx queue.
+		 */
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
+				rim->rules, rim->rule_num, true);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "cpfl filter create flow fail");
+		rte_free(rim);
+		return ret;
+	}
+
+	flow->rule = rim;
+
+	return ret;
+}
+
+static inline void
+cpfl_fxp_rule_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+	flow->rule = NULL;
+}
+
+static int
+cpfl_fxp_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim;
+	uint32_t i;
+	struct cpfl_vport *vport;
+
+	rim = flow->rule;
+	if (!rim) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "no such flow create by cpfl filter");
+
+		return -rte_errno;
+	}
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		ret = -rte_errno;
+		goto err;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules,
+				rim->rule_num, false);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to destroy cpfl filter rule");
+		goto err;
+	}
+
+	/* free mod index */
+	for (i = rim->pr_num; i < rim->rule_num; i++)
+		cpfl_fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
+err:
+	cpfl_fxp_rule_free(flow);
+	return ret;
+}
+
+static bool
+cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action,
+		       struct cpfl_rule_info_meta *rim,
+		       int i)
+{
+	if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		struct cpfl_rule_info *rinfo = &rim->rules[i];
+
+		rinfo->type = CPFL_RULE_TYPE_SEM;
+		rinfo->sem.prof_id = pr_action->sem.prof;
+		rinfo->sem.sub_prof_id = pr_action->sem.subprof;
+		rinfo->sem.key_byte_len = pr_action->sem.keysize;
+		rte_memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len);
+		rinfo->sem.pin_to_cache = CPFL_PIN_TO_CACHE_DEF;
+		rinfo->sem.fixed_fetch = CPFL_FIXED_FETCH_DEF;
+	} else {
+		PMD_DRV_LOG(ERR, "Invalid pattern item.");
+		return false;
+	}
+
+	return true;
+}
+
+static int
+cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_rule_info *match_rinfo,
+		       struct cpfl_rule_info *mod_rinfo,
+		       const struct cpfl_flow_mr_action *mr_action)
+{
+	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
+	uint32_t mod_idx;
+	int i;
+	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
+	union cpfl_action_set *act_set =
+		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
+
+	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
+		return -EINVAL;
+
+	*act_set = cpfl_act_mod_profile(CPFL_PREC_DEF,
+					mr_action->mod.prof,
+					CPFL_PTI_DEF,
+					0, /* append */
+					0, /* prepend */
+					CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_idx = cpfl_fxp_mod_idx_alloc(adapter);
+	if (mod_idx == CPFL_MAX_MOD_CONTENT_INDEX) {
+		PMD_DRV_LOG(ERR, "Out of Mod Index.");
+		return -ENOMEM;
+	}
+
+	*act_set = cpfl_act_mod_addr(CPFL_PREC_DEF, mod_idx);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
+	minfo->mod_obj_size = CPFL_MOD_OBJ_SIZE_DEF;
+	minfo->pin_mod_content = CPFL_PIN_MOD_CONTENT_DEF;
+	minfo->mod_index = mod_idx;
+	mod_rinfo->cookie = CPFL_MOD_COOKIE_DEF;
+	mod_rinfo->port_num = CPFL_PORT_NUM_DEF;
+	mod_rinfo->resp_req = CPFL_RESP_REQ_DEF;
+
+	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
+	for (i = 0; i < minfo->mod_content_byte_len; i++)
+		minfo->mod_content[i] = mr_action->mod.data[i];
+
+	return 0;
+}
+
+#define CPFL_FXP_MAX_QREGION_SIZE 128
+#define CPFL_INVALID_QUEUE_ID -2
+static int
+cpfl_fxp_parse_action(struct cpfl_itf *itf,
+		      const struct rte_flow_action *actions,
+		      const struct cpfl_flow_mr_action *mr_action,
+		      struct cpfl_rule_info_meta *rim,
+		      int priority,
+		      int index)
+{
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *rss;
+	struct rte_eth_dev_data *data;
+	enum rte_flow_action_type action_type;
+	struct cpfl_vport *vport;
+	/* used when action is PORT_REPRESENTOR type */
+	struct cpfl_itf *dst_itf;
+	uint16_t dev_id; /* vsi id */
+	int queue_id = -1;
+	bool fwd_vsi = false;
+	bool fwd_q = false;
+	uint32_t i;
+	struct cpfl_rule_info *rinfo = &rim->rules[index];
+	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
+
+	priority = CPFL_PREC_MAX - priority;
+	for (action = actions; action->type !=
+	     RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			if (!fwd_vsi)
+				fwd_vsi = true;
+			else
+				goto err;
+
+			act_ethdev = action->conf;
+			dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id);
+
+			if (!dst_itf)
+				goto err;
+
+			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
+				vport = (struct cpfl_vport *)dst_itf;
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			} else {
+				queue_id = CPFL_INVALID_QUEUE_ID;
+			}
+
+			dev_id = cpfl_get_vsi_id(dst_itf);
+
+			if (dev_id == CPFL_INVALID_HW_ID)
+				goto err;
+
+			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			act_q = action->conf;
+			data = itf->data;
+			if (act_q->index >= data->nb_rx_queues)
+				goto err;
+
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += act_q->index;
+			*act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			if (rss->queue_num <= 1)
+				goto err;
+			for (i = 0; i < rss->queue_num - 1; i++) {
+				if (rss->queue[i + 1] != rss->queue[i] + 1)
+					goto err;
+			}
+			data = itf->data;
+			if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues)
+				goto err;
+			if (!(rte_is_power_of_2(rss->queue_num) &&
+			      rss->queue_num <= CPFL_FXP_MAX_QREGION_SIZE))
+				goto err;
+
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += rss->queue[0];
+			*act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id,
+								  log(rss->queue_num) / log(2), 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			(*act_set).data = cpfl_act_drop(priority).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			(*act_set).data = cpfl_act_set_commit_mode(priority, 0).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			goto err;
+		}
+	}
+
+	if (mr_action) {
+		uint32_t i;
+
+		for (i = 0; i < rim->mr_num; i++)
+			if (cpfl_parse_mod_content(itf->adapter, rinfo,
+						   &rim->rules[rim->pr_num + i],
+						   &mr_action[i]))
+				goto err;
+	}
+
+	return 0;
+
+err:
+	PMD_DRV_LOG(ERR, "Invalid action type");
+	return -EINVAL;
+}
+
+static void
+cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo)
+{
+	if (cpfl_rule_cookie == ~0llu)
+		cpfl_rule_cookie = CPFL_COOKIE_DEF;
+	rinfo->cookie = cpfl_rule_cookie++;
+	rinfo->host_id = CPFL_HOST_ID_DEF;
+	rinfo->port_num = CPFL_PORT_NUM_DEF;
+	rinfo->resp_req = CPFL_RESP_REQ_DEF;
+	rinfo->clear_mirror_1st_state = CPFL_CLEAR_MIRROR_1ST_STATE_DEF;
+}
+
+static bool
+cpfl_is_mod_action(const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			return true;
+		default:
+			continue;
+		}
+	}
+	return false;
+}
+
+static int
+cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      void **meta)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_pr_action pr_action = { 0 };
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_flow_mr_action mr_action[CPFL_MAX_MR_ACTION_NUM] = { 0 };
+	uint32_t pr_num = 0;
+	uint32_t mr_num = 0;
+	struct cpfl_rule_info_meta *rim;
+	int ret;
+
+	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "No Match pattern support.");
+		return -EINVAL;
+	}
+
+	if (cpfl_is_mod_action(actions)) {
+		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "action parse fails.");
+			return -EINVAL;
+		}
+		mr_num++;
+	}
+
+	pr_num = 1;
+	rim = rte_zmalloc(NULL,
+			  sizeof(struct cpfl_rule_info_meta) +
+			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
+			  0);
+	if (!rim)
+		return -ENOMEM;
+
+	rim->pr_action = pr_action;
+	rim->pr_num = pr_num;
+	rim->mr_num = mr_num;
+	rim->rule_num = pr_num + mr_num;
+
+	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid pattern");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid action");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	cpfl_fill_rinfo_default_value(&rim->rules[0]);
+
+	if (!meta)
+		rte_free(rim);
+	else
+		*meta = rim;
+
+	return 0;
+}
+
+static int
+cpfl_fxp_mod_init(struct cpfl_adapter_ext *ad)
+{
+	uint32_t size = rte_bitmap_get_memory_footprint(CPFL_MAX_MOD_CONTENT_INDEX);
+	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+	if (!mem)
+		return -ENOMEM;
+
+	/* a set bit represent a free slot */
+	ad->mod_bm = rte_bitmap_init_with_all_set(CPFL_MAX_MOD_CONTENT_INDEX, mem, size);
+	if (!ad->mod_bm) {
+		rte_free(mem);
+		return -EINVAL;
+	}
+
+	ad->mod_bm_mem = mem;
+
+	return 0;
+}
+
+static void
+cpfl_fxp_mod_uninit(struct cpfl_adapter_ext *ad)
+{
+	rte_free(ad->mod_bm_mem);
+	ad->mod_bm_mem = NULL;
+	ad->mod_bm = NULL;
+}
+
+static uint32_t
+cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad)
+{
+	uint64_t slab = 0;
+	uint32_t pos = 0;
+
+	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
+		return CPFL_MAX_MOD_CONTENT_INDEX;
+
+	pos += __builtin_ffsll(slab) - 1;
+	rte_bitmap_clear(ad->mod_bm, pos);
+
+	return pos;
+}
+
+static void
+cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
+{
+	rte_bitmap_set(ad->mod_bm, idx);
+}
+
+static int
+cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
+	       struct rte_flow *flow __rte_unused,
+	       struct rte_flow_query_count *count __rte_unused,
+	       struct rte_flow_error *error)
+{
+	rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_HANDLE,
+			   NULL,
+			   "count action not supported by this module");
+
+	return -rte_errno;
+}
+
+static void
+cpfl_fxp_uninit(struct cpfl_adapter_ext *ad)
+{
+	cpfl_fxp_mod_uninit(ad);
+}
+
+static int
+cpfl_fxp_init(struct cpfl_adapter_ext *ad)
+{
+	int ret = 0;
+
+	ret = cpfl_fxp_mod_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct
+cpfl_flow_engine cpfl_fxp_engine = {
+	.type = CPFL_FLOW_ENGINE_FXP,
+	.init = cpfl_fxp_init,
+	.uninit = cpfl_fxp_uninit,
+	.create = cpfl_fxp_create,
+	.destroy = cpfl_fxp_destroy,
+	.query_count = cpfl_fxp_query,
+	.parse_pattern_action = cpfl_fxp_parse_pattern_action,
+};
+
+RTE_INIT(cpfl_sw_engine_init)
+{
+	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
+
+	cpfl_flow_engine_register(engine);
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 6118a16329..5fd1cbd045 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -46,6 +46,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
 	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
 	    'cpfl_fxp_rule.c',
+	    'cpfl_flow_engine_fxp.c',
     )
     ext_deps += jansson_dep
 endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v9 8/9] net/cpfl: add flow support for representor
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
                                 ` (7 preceding siblings ...)
  2023-09-28  8:44               ` [PATCH v9 7/9] net/cpfl: add fxp flow engine yuying.zhang
@ 2023-09-28  8:44               ` yuying.zhang
  2023-09-28  8:44               ` [PATCH v9 9/9] net/cpfl: add support of to represented port action yuying.zhang
                                 ` (4 subsequent siblings)
  13 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-28  8:44 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add flow support for representor, so representor can
create, destroy, validate and flush rules.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 74 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.c     | 29 ++++++++++
 2 files changed, 103 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index 154af5bd35..c460e6b5c6 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -73,6 +73,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 	struct cpfl_adapter_ext *ad = itf->adapter;
 	struct cpfl_rule_info_meta *rim = meta;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	if (!rim)
 		return ret;
@@ -83,6 +84,10 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 		 * Even index is tx queue and odd index is rx queue.
 		 */
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -122,6 +127,7 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	uint32_t i;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	rim = flow->rule;
 	if (!rim) {
@@ -135,6 +141,10 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	if (itf->type == CPFL_ITF_TYPE_VPORT) {
 		vport = (struct cpfl_vport *)itf;
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -414,6 +424,64 @@ cpfl_is_mod_action(const struct rte_flow_action actions[])
 	return false;
 }
 
+static bool
+cpfl_fxp_get_metadata_port(struct cpfl_itf *itf,
+			   const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	const struct rte_flow_action_ethdev *ethdev;
+	struct cpfl_itf *target_itf;
+	bool ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		ret = cpfl_metadata_write_port_id(itf);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "fail to write port id");
+			return false;
+		}
+	}
+
+	ret = cpfl_metadata_write_sourcevsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write source vsi id");
+		return false;
+	}
+
+	ret = cpfl_metadata_write_vsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write vsi id");
+		return false;
+	}
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			ethdev = (const struct rte_flow_action_ethdev *)action->conf;
+			target_itf = cpfl_get_itf_by_port_id(ethdev->port_id);
+			if (!target_itf) {
+				PMD_DRV_LOG(ERR, "fail to get target_itf by port id");
+				return false;
+			}
+			ret = cpfl_metadata_write_targetvsi(target_itf);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "fail to write target vsi id");
+				return false;
+			}
+			break;
+		default:
+			continue;
+		}
+	}
+
+	return true;
+}
+
 static int
 cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
@@ -430,6 +498,12 @@ cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	int ret;
 
+	ret = cpfl_fxp_get_metadata_port(itf, actions);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Fail to save metadata.");
+		return -EINVAL;
+	}
+
 	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "No Match pattern support.");
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 4d15a26c80..de3b426727 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,8 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 static int
 cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
@@ -374,6 +376,22 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
+static int
+cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev,
+			   const struct rte_flow_ops **ops)
+{
+	if (!dev)
+		return -EINVAL;
+
+#ifdef RTE_HAS_JANSSON
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -385,6 +403,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
 
 	.link_update		= cpfl_repr_link_update,
+	.flow_ops_get		= cpfl_dev_repr_flow_ops_get,
 };
 
 static int
@@ -393,6 +412,7 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
 	struct cpfl_repr_param *param = init_param;
 	struct cpfl_adapter_ext *adapter = param->adapter;
+	int ret;
 
 	repr->repr_id = param->repr_id;
 	repr->vport_info = param->vport_info;
@@ -402,6 +422,15 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
 		repr->func_up = true;
 
+	TAILQ_INIT(&repr->itf.flow_list);
+	memset(repr->itf.dma, 0, sizeof(repr->itf.dma));
+	memset(repr->itf.msg, 0, sizeof(repr->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		return ret;
+
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v9 9/9] net/cpfl: add support of to represented port action
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
                                 ` (8 preceding siblings ...)
  2023-09-28  8:44               ` [PATCH v9 8/9] net/cpfl: add flow support for representor yuying.zhang
@ 2023-09-28  8:44               ` yuying.zhang
  2023-09-28 12:45               ` [PATCH v9 0/9] add rte flow support for cpfl Zhang, Qi Z
                                 ` (3 subsequent siblings)
  13 siblings, 0 replies; 128+ messages in thread
From: yuying.zhang @ 2023-09-28  8:44 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add support of to represented port action for forwarding
packet to APF/CPF/VF representors.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index c460e6b5c6..fed18d8349 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -267,6 +267,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 	int queue_id = -1;
 	bool fwd_vsi = false;
 	bool fwd_q = false;
+	bool is_vsi;
 	uint32_t i;
 	struct cpfl_rule_info *rinfo = &rim->rules[index];
 	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
@@ -277,6 +278,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 		action_type = action->type;
 		switch (action_type) {
 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
 			if (!fwd_vsi)
 				fwd_vsi = true;
 			else
@@ -295,12 +297,20 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 				queue_id = CPFL_INVALID_QUEUE_ID;
 			}
 
-			dev_id = cpfl_get_vsi_id(dst_itf);
+			is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
+				  dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR);
+			if (is_vsi)
+				dev_id = cpfl_get_vsi_id(dst_itf);
+			else
+				dev_id = cpfl_get_port_id(dst_itf);
 
 			if (dev_id == CPFL_INVALID_HW_ID)
 				goto err;
 
-			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			if (is_vsi)
+				*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			else
+				*act_set = cpfl_act_fwd_port(0, priority, 0, dev_id);
 			act_set++;
 			rinfo->act_byte_len += sizeof(union cpfl_action_set);
 			break;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v9 0/9] add rte flow support for cpfl
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
                                 ` (9 preceding siblings ...)
  2023-09-28  8:44               ` [PATCH v9 9/9] net/cpfl: add support of to represented port action yuying.zhang
@ 2023-09-28 12:45               ` Zhang, Qi Z
  2023-09-28 16:04               ` Stephen Hemminger
                                 ` (2 subsequent siblings)
  13 siblings, 0 replies; 128+ messages in thread
From: Zhang, Qi Z @ 2023-09-28 12:45 UTC (permalink / raw)
  To: Zhang, Yuying, dev, Wu, Jingjing, Xing, Beilei



> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Thursday, September 28, 2023 4:45 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; dev@dpdk.org; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v9 0/9] add rte flow support for cpfl
> 
> From: Yuying Zhang <yuying.zhang@intel.com>
> 
> This patchset add rte flow support for cpfl driver.
> It depends on the following patch set:
> http://patchwork.dpdk.org/project/dpdk/cover/20230912173039.1612287-1-
> beilei.xing@intel.com/
> 
> Wenjing Qiao (2):
>   net/cpfl: add json parser for rte flow pattern rules
>   net/cpfl: build action mapping rules from JSON
> 
> Yuying Zhang (7):
>   net/cpfl: set up rte flow skeleton
>   net/cpfl: set up control path
>   net/cpfl: add FXP low level implementation
>   net/cpfl: add fxp rule module
>   net/cpfl: add fxp flow engine
>   net/cpfl: add flow support for representor
>   net/cpfl: add support of to represented port action
> ---
> v9:
> * refine rx queue message process function
> 
> v8:
> * fix compile issues
> * refine document and separate patch with different features
> 
> v7:
> * refine commit log
> * fix compile issues
> 
> v6:
> * use existed jansson instead of json-c library.
> * refine "add FXP low level implementation"
> 
> V5:
> * Add input validation for some functions.
> 
>  doc/guides/nics/cpfl.rst                |   52 +
>  doc/guides/rel_notes/release_23_11.rst  |    1 +
>  drivers/net/cpfl/cpfl_actions.h         |  858 +++++++++++
>  drivers/net/cpfl/cpfl_controlq.c        |  801 ++++++++++
>  drivers/net/cpfl/cpfl_controlq.h        |   75 +
>  drivers/net/cpfl/cpfl_ethdev.c          |  392 ++++-
>  drivers/net/cpfl/cpfl_ethdev.h          |  128 ++
>  drivers/net/cpfl/cpfl_flow.c            |  339 +++++
>  drivers/net/cpfl/cpfl_flow.h            |   85 ++
>  drivers/net/cpfl/cpfl_flow_engine_fxp.c |  667 ++++++++
>  drivers/net/cpfl/cpfl_flow_parser.c     | 1839 +++++++++++++++++++++++
>  drivers/net/cpfl/cpfl_flow_parser.h     |  267 ++++
>  drivers/net/cpfl/cpfl_fxp_rule.c        |  263 ++++
>  drivers/net/cpfl/cpfl_fxp_rule.h        |   68 +
>  drivers/net/cpfl/cpfl_representor.c     |   29 +
>  drivers/net/cpfl/cpfl_rules.c           |  127 ++
>  drivers/net/cpfl/cpfl_rules.h           |  306 ++++
>  drivers/net/cpfl/cpfl_vchnl.c           |  144 ++
>  drivers/net/cpfl/meson.build            |   12 +
>  19 files changed, 6452 insertions(+), 1 deletion(-)  create mode 100644
> drivers/net/cpfl/cpfl_actions.h  create mode 100644
> drivers/net/cpfl/cpfl_controlq.c  create mode 100644
> drivers/net/cpfl/cpfl_controlq.h  create mode 100644
> drivers/net/cpfl/cpfl_flow.c  create mode 100644 drivers/net/cpfl/cpfl_flow.h
> create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
>  create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
>  create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
>  create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c  create mode 100644
> drivers/net/cpfl/cpfl_fxp_rule.h  create mode 100644
> drivers/net/cpfl/cpfl_rules.c  create mode 100644 drivers/net/cpfl/cpfl_rules.h
> 
> --
> 2.34.1

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel after refined some commit logs.

Thanks
Qi


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [PATCH v9 0/9] add rte flow support for cpfl
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
                                 ` (10 preceding siblings ...)
  2023-09-28 12:45               ` [PATCH v9 0/9] add rte flow support for cpfl Zhang, Qi Z
@ 2023-09-28 16:04               ` Stephen Hemminger
  2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
  2023-10-15 11:21               ` [PATCH v9 " Thomas Monjalon
  13 siblings, 0 replies; 128+ messages in thread
From: Stephen Hemminger @ 2023-09-28 16:04 UTC (permalink / raw)
  To: yuying.zhang; +Cc: dev, qi.z.zhang, jingjing.wu, beilei.xing

On Thu, 28 Sep 2023 08:44:49 +0000
yuying.zhang@intel.com wrote:

> From: Yuying Zhang <yuying.zhang@intel.com>
> 
> This patchset add rte flow support for cpfl driver.
> It depends on the following patch set:
> http://patchwork.dpdk.org/project/dpdk/cover/20230912173039.1612287-1-beilei.xing@intel.com/
> 
> Wenjing Qiao (2):
>   net/cpfl: add json parser for rte flow pattern rules
>   net/cpfl: build action mapping rules from JSON
> 
> Yuying Zhang (7):
>   net/cpfl: set up rte flow skeleton
>   net/cpfl: set up control path
>   net/cpfl: add FXP low level implementation
>   net/cpfl: add fxp rule module
>   net/cpfl: add fxp flow engine
>   net/cpfl: add flow support for representor
>   net/cpfl: add support of to represented port action

Are there new test cases needed for this or would it get covered
by regular rte_flow tests. 
We really need more of a test suite for rte_flow, but it is difficult because
there is so much variation between NIC's.

^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 0/9] add rte flow support for cpfl
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
                                 ` (11 preceding siblings ...)
  2023-09-28 16:04               ` Stephen Hemminger
@ 2023-10-09  4:00               ` Zhang, Yuying
  2023-10-09  4:00                 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
                                   ` (9 more replies)
  2023-10-15 11:21               ` [PATCH v9 " Thomas Monjalon
  13 siblings, 10 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-10-09  4:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

This patchset add rte flow support for cpfl driver.
It depends on the following patch set:
http://patchwork.dpdk.org/project/dpdk/cover/20230912173039.1612287-1-beilei.xing@intel.com/

Wenjing Qiao (2):
  net/cpfl: parse flow offloading hint from JSON
  net/cpfl: build action mapping rules from JSON

Yuying Zhang (7):
  net/cpfl: set up flow offloading skeleton
  net/cpfl: set up control path
  net/cpfl: add FXP low level implementation
  net/cpfl: implement FXP rule creation and destroying
  net/cpfl: adapt FXP to flow engine
  net/cpfl: support flow ops on representor
  net/cpfl: support represented port action
---
v10:
* fix ci build issue

v9:
* refine rx queue message process function

v8:
* fix compile issues
* refine document and separate patch with different features

v7:
* refine commit log
* fix compile issues

v6:
* use existed jansson instead of json-c library
* refine "add FXP low level implementation"

V5:
* Add input validation for some functions

 doc/guides/nics/cpfl.rst                |   52 +
 doc/guides/rel_notes/release_23_11.rst  |    1 +
 drivers/net/cpfl/cpfl_actions.h         |  858 +++++++++++
 drivers/net/cpfl/cpfl_controlq.c        |  801 ++++++++++
 drivers/net/cpfl/cpfl_controlq.h        |   75 +
 drivers/net/cpfl/cpfl_ethdev.c          |  392 ++++-
 drivers/net/cpfl/cpfl_ethdev.h          |  128 ++
 drivers/net/cpfl/cpfl_flow.c            |  339 +++++
 drivers/net/cpfl/cpfl_flow.h            |   85 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c |  666 ++++++++
 drivers/net/cpfl/cpfl_flow_parser.c     | 1835 +++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h     |  268 ++++
 drivers/net/cpfl/cpfl_fxp_rule.c        |  263 ++++
 drivers/net/cpfl/cpfl_fxp_rule.h        |   68 +
 drivers/net/cpfl/cpfl_representor.c     |   29 +
 drivers/net/cpfl/cpfl_rules.c           |  127 ++
 drivers/net/cpfl/cpfl_rules.h           |  306 ++++
 drivers/net/cpfl/cpfl_vchnl.c           |  144 ++
 drivers/net/cpfl/meson.build            |   12 +
 19 files changed, 6448 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON
  2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
@ 2023-10-09  4:00                 ` Zhang, Yuying
  2023-10-09  4:00                 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
                                   ` (8 subsequent siblings)
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-10-09  4:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Add devargs "flow_parser" to specify the path of a JSON
configure file. The cpfl PMD use the JSON configuration file
to translate rte_flow tokens into low level hardware
representation.

Example:
    -a ca:00.0,flow_parser="refpkg.json"

jansson library is used to parse JSON syntax.

In this patch, The parser only builds rules which maps from
a set of rte_flow items to hardware representations. The rules
that maps from rte_flow actions will be enabled in a separate
patch to avoid a big size patch.

Note, the JSON configuration file is provided by the hardware vendor
and is intended to work exclusively with a specific P4 pipeline
configuration, which must be compiled and programmed into the hardware.

The format of the JSON file strictly follows the internal specifications
of the hardware vendor and is not meant to be modified directly by
users.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 doc/guides/nics/cpfl.rst            |   38 +
 drivers/net/cpfl/cpfl_ethdev.c      |   38 +-
 drivers/net/cpfl/cpfl_ethdev.h      |   76 ++
 drivers/net/cpfl/cpfl_flow_parser.c | 1299 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  168 ++++
 drivers/net/cpfl/meson.build        |    7 +
 6 files changed, 1625 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 83a18c3f2e..e17347d15c 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -128,12 +128,32 @@ Runtime Configuration
 
     -a BDF,representor=vf[0-3],representor=c1pf1
 
+- ``flow_parser`` (default ``not enabled``)
+
+  The cpfl PMD supports utilizing a JSON config file to translate rte_flow tokens into
+  low-level hardware resources.
+
+  The JSON configuration file is provided by the hardware vendor and is intended to work
+  exclusively with a specific P4 pipeline configuration, which must be compiled and programmed
+  into the hardware.
+
+  The format of the JSON file strictly follows the internal specifications of the hardware
+  vendor and is not meant to be modified directly by users.
+
+  Using the ``devargs`` option ``flow_parser`` the user can specify the path
+  of a json file, for example::
+
+    -a ca:00.0,flow_parser="refpkg.json"
+
+  Then the PMD will load json file for device ``ca:00.0``.
+  The parameter is optional.
 
 Driver compilation and testing
 ------------------------------
 
 Refer to the document :doc:`build_and_test` for details.
 
+The jansson library must be installed to use rte_flow.
 
 Features
 --------
@@ -164,3 +184,21 @@ Hairpin queue
 E2100 Series can loopback packets from RX port to TX port.
 This feature is called port-to-port or hairpin.
 Currently, the PMD only supports single port hairpin.
+
+Rte_flow
+~~~~~~~~~~~~~
+
+PMD uses a json file to direct CPF PMD to parse rte_flow tokens into
+low level hardware resources.
+
+- Required Libraries
+
+  * jansson
+
+    * For Ubuntu, it can be installed using `apt install libjansson-dev`
+
+- run testpmd with the json file
+
+   .. code-block:: console
+
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 189072ab33..1745f703c8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -21,6 +21,10 @@
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
 
+#ifdef RTE_HAS_JANSSON
+#define CPFL_FLOW_PARSER	"flow_parser"
+#endif
+
 rte_spinlock_t cpfl_adapter_lock;
 /* A list for all adapters, one adapter matches one PCI device */
 struct cpfl_adapter_list cpfl_adapter_list;
@@ -31,6 +35,9 @@ static const char * const cpfl_valid_args_first[] = {
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
+#ifdef RTE_HAS_JANSSON
+	CPFL_FLOW_PARSER,
+#endif
 	NULL
 };
 
@@ -1537,6 +1544,24 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 	return 0;
 }
 
+#ifdef RTE_HAS_JANSSON
+static int
+parse_file(const char *key, const char *value, void *args)
+{
+	char *name = args;
+
+	if (strlen(value) > CPFL_FLOW_FILE_LEN - 1) {
+		PMD_DRV_LOG(ERR, "file path(%s) is too long.", value);
+		return -1;
+	}
+
+	PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key);
+	strlcpy(name, value, CPFL_FLOW_FILE_LEN);
+
+	return 0;
+}
+#endif
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
@@ -1585,7 +1610,18 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 				 &adapter->base.is_rx_singleq);
 	if (ret != 0)
 		goto fail;
-
+#ifdef RTE_HAS_JANSSON
+	if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) {
+		ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER,
+					 &parse_file, cpfl_args->flow_parser);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to parser flow_parser, ret: %d", ret);
+			goto fail;
+		}
+	} else {
+		cpfl_args->flow_parser[0] = '\0';
+	}
+#endif
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index d0dcc0cc05..383dbd14c6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -77,6 +77,11 @@
 #define CPFL_VPORT_LAN_PF	0
 #define CPFL_VPORT_LAN_VF	1
 
+#define CPFL_FLOW_FILE_LEN 100
+#define CPFL_INVALID_HW_ID	UINT16_MAX
+#define CPFL_META_CHUNK_LENGTH	1024
+#define CPFL_META_LENGTH	32
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -99,6 +104,7 @@ struct cpfl_devargs {
 	uint16_t req_vport_nb;
 	uint8_t repr_args_num;
 	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
+	char flow_parser[CPFL_FLOW_FILE_LEN];
 };
 
 struct p2p_queue_chunks_info {
@@ -165,6 +171,20 @@ struct cpfl_repr {
 	bool func_up; /* If the represented function is up */
 };
 
+struct cpfl_metadata_chunk {
+	int type;
+	uint8_t data[CPFL_META_CHUNK_LENGTH];
+};
+
+/**
+ * It is driver's responsibility to simlulate a metadata buffer which
+ * can be used as data source to fill the key of a flow rule.
+ */
+struct cpfl_metadata {
+	int length;
+	struct cpfl_metadata_chunk chunks[CPFL_META_LENGTH];
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
@@ -185,6 +205,8 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
+
+	struct cpfl_metadata meta;
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -211,4 +233,58 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
 
+static inline uint16_t
+cpfl_get_port_id(struct cpfl_itf *itf)
+{
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		struct cpfl_vport *vport = (void *)itf;
+
+		return vport->base.devarg_id;
+	}
+
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline uint16_t
+cpfl_get_vsi_id(struct cpfl_itf *itf)
+{
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_vport_info *info;
+	uint32_t vport_id;
+	int ret;
+	struct cpfl_vport_id vport_identity;
+
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		struct cpfl_repr *repr = (void *)itf;
+
+		return repr->vport_info->vport.info.vsi_id;
+	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
+
+		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		/* host: CPFL_HOST0_CPF_ID, acc: CPFL_ACC_CPF_ID */
+		vport_identity.pf_id = CPFL_ACC_CPF_ID;
+		vport_identity.vf_id = 0;
+		vport_identity.vport_id = vport_id;
+		ret = rte_hash_lookup_data(adapter->vport_map_hash,
+					   &vport_identity,
+					   (void **)&info);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "vport id not exist");
+			goto err;
+		}
+
+		return info->vport.info.vsi_id;
+	}
+
+err:
+	return CPFL_INVALID_HW_ID;
+}
+
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
new file mode 100644
index 0000000000..a5fff5a857
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -0,0 +1,1299 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <arpa/inet.h>
+
+#include "cpfl_flow_parser.h"
+
+static enum rte_flow_item_type
+cpfl_get_item_type_by_str(const char *type)
+{
+	if (strcmp(type, "eth") == 0)
+		return RTE_FLOW_ITEM_TYPE_ETH;
+	else if (strcmp(type, "ipv4") == 0)
+		return RTE_FLOW_ITEM_TYPE_IPV4;
+	else if (strcmp(type, "tcp") == 0)
+		return RTE_FLOW_ITEM_TYPE_TCP;
+	else if (strcmp(type, "udp") == 0)
+		return RTE_FLOW_ITEM_TYPE_UDP;
+	else if (strcmp(type, "vxlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VXLAN;
+	else if (strcmp(type, "icmp") == 0)
+		return RTE_FLOW_ITEM_TYPE_ICMP;
+	else if (strcmp(type, "vlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VLAN;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ITEM_TYPE_VOID;
+}
+
+static const char *
+cpfl_json_t_to_string(json_t *object, const char *name)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return NULL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return NULL;
+	}
+
+	return json_string_value(subobject);
+}
+
+static int
+cpfl_json_t_to_uint16(json_t *object, const char *name, uint16_t *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (uint16_t)json_integer_value(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_json_t_to_uint32(json_t *object, const char *name, uint32_t *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (uint32_t)json_integer_value(subobject);
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_attr(json_t *ob_pr_key_attrs, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len;
+	struct cpfl_flow_js_pr_key_attr *attr;
+
+	len = json_array_size(ob_pr_key_attrs);
+	js_pr->key.attributes = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_attr), 0);
+	if (!js_pr->key.attributes) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->key.attr_size = len;
+	attr = js_pr->key.attributes;
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		const char *name;
+		uint16_t value = 0;
+		int ret;
+
+		object = json_array_get(ob_pr_key_attrs, i);
+		name = cpfl_json_t_to_string(object, "Name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'Name'.");
+			goto err;
+		}
+		ret = cpfl_json_t_to_uint16(object, "Value", &value);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+			goto err;
+		}
+		if (strcmp(name, "ingress") == 0) {
+			attr->ingress = value;
+		} else if (strcmp(name, "egress") == 0) {
+			attr->egress = value;
+		} else {
+			/* TODO: more... */
+			PMD_DRV_LOG(ERR, "Not support attr name: %s.", name);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	rte_free(js_pr->key.attributes);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto_field(json_t *ob_fields,
+				     struct cpfl_flow_js_pr_key_proto *js_field)
+{
+	int len, i;
+
+	if (!ob_fields)
+		return 0;
+	len = json_array_size(ob_fields);
+	if (len == 0)
+		return 0;
+	js_field->fields_size = len;
+	js_field->fields =
+	    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len, 0);
+	if (!js_field->fields) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		const char *name, *mask;
+
+		object = json_array_get(ob_fields, i);
+		name = cpfl_json_t_to_string(object, "name");
+		if (!name) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
+			goto err;
+		}
+		if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+			PMD_DRV_LOG(ERR, "The 'name' is too long.");
+			goto err;
+		}
+		memcpy(js_field->fields[i].name, name, strlen(name));
+
+		if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
+		    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+			mask = cpfl_json_t_to_string(object, "mask");
+			if (!mask) {
+				PMD_DRV_LOG(ERR, "Can not parse string 'mask'.");
+				goto err;
+			}
+			if (strlen(mask) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+				PMD_DRV_LOG(ERR, "The 'mask' is too long.");
+				goto err;
+			}
+			memcpy(js_field->fields[i].mask, mask, strlen(mask));
+		} else {
+			uint32_t mask_32b;
+			int ret;
+
+			ret = cpfl_json_t_to_uint32(object, "mask", &mask_32b);
+			if (ret < 0) {
+				PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'.");
+				goto err;
+			}
+			js_field->fields[i].mask_32b = mask_32b;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_field->fields);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto(json_t *ob_pr_key_protos, struct cpfl_flow_js_pr *js_pr)
+{
+	int len, i, ret;
+
+	len = json_array_size(ob_pr_key_protos);
+	if (len == 0)
+		return 0;
+	js_pr->key.proto_size = len;
+	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto) * len, 0);
+	if (!js_pr->key.protocols) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_t *object, *ob_fields;
+		const char *type;
+		enum rte_flow_item_type item_type;
+
+		object = json_array_get(ob_pr_key_protos, i);
+		/* pr->key->proto->type */
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		item_type = cpfl_get_item_type_by_str(type);
+		if (item_type == RTE_FLOW_ITEM_TYPE_VOID)
+			goto err;
+		js_pr->key.protocols[i].type = item_type;
+		/* pr->key->proto->fields */
+		ob_fields = json_object_get(object, "fields");
+		ret = cpfl_flow_js_pattern_key_proto_field(ob_fields,
+							   &js_pr->key.protocols[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	rte_free(js_pr->key.protocols);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_proto(json_t *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	uint16_t layer = 0, offset = 0, mask = 0;
+	const char *header;
+	enum rte_flow_item_type type;
+	int ret;
+
+	ret = cpfl_json_t_to_uint16(ob_value, "layer", &layer);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+		return -EINVAL;
+	}
+
+	header = cpfl_json_t_to_string(ob_value, "header");
+	if (!header) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "offset", &offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "mask", &mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
+		return -EINVAL;
+	}
+	type = cpfl_get_item_type_by_str(header);
+	if (type == RTE_FLOW_ITEM_TYPE_VOID)
+		return -EINVAL;
+	js_fv->proto.layer = layer;
+	js_fv->proto.offset = offset;
+	js_fv->proto.mask = mask;
+	js_fv->proto.header = type;
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_metadata(json_t *ob_value, struct cpfl_flow_js_fv *js_fv)
+{
+	int ret;
+
+	ret = cpfl_json_t_to_uint16(ob_value, "type", &js_fv->meta.type);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "offset", &js_fv->meta.offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+	ret = cpfl_json_t_to_uint16(ob_value, "mask", &js_fv->meta.mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv(json_t *ob_fvs, struct cpfl_flow_js_pr_action *js_act)
+{
+	int len, i;
+
+	len = json_array_size(ob_fvs);
+	if (len == 0)
+		return 0;
+	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0);
+	if (!js_act->sem.fv) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_act->sem.fv_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_fv *js_fv;
+		json_t *object, *ob_value;
+		uint16_t offset = 0;
+		const char *type;
+		int ret;
+
+		js_fv = &js_act->sem.fv[i];
+		object = json_array_get(ob_fvs, i);
+		ret = cpfl_json_t_to_uint16(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_fv->offset = offset;
+
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		ob_value = json_object_get(object, "value");
+		if (strcmp(type, "immediate") == 0) {
+			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
+			js_fv->immediate = json_integer_value(ob_value);
+		} else if (strcmp(type, "metadata") == 0) {
+			js_fv->type = CPFL_FV_TYPE_METADATA;
+			cpfl_flow_js_pattern_act_fv_metadata(ob_value, js_fv);
+		} else if (strcmp(type, "protocol") == 0) {
+			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
+			cpfl_flow_js_pattern_act_fv_proto(ob_value, js_fv);
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_act->sem.fv);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_pattern_per_act(json_t *ob_per_act, struct cpfl_flow_js_pr_action *js_act)
+{
+	const char *type;
+	int ret;
+
+	/* pr->actions->type */
+	type = cpfl_json_t_to_string(ob_per_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* pr->actions->data */
+	if (strcmp(type, "sem") == 0) {
+		json_t *ob_fvs, *ob_sem;
+
+		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+		ob_sem = json_object_get(ob_per_act, "data");
+		ret = cpfl_json_t_to_uint16(ob_sem, "profile", &js_act->sem.prof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_t_to_uint16(ob_sem, "subprofile", &js_act->sem.subprof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_t_to_uint16(ob_sem, "keysize", &js_act->sem.keysize);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
+			return -EINVAL;
+		}
+		ob_fvs = json_object_get(ob_sem, "fieldvectors");
+		ret = cpfl_flow_js_pattern_act_fv(ob_fvs, js_act);
+		if (ret < 0)
+			return ret;
+	} else {
+		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act(json_t *ob_pr_acts, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len, ret;
+
+	len = json_array_size(ob_pr_acts);
+	if (len == 0)
+		return 0;
+	js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action) * len, 0);
+	if (!js_pr->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->actions_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_pr_action *js_act;
+		json_t *object;
+
+		object = json_array_get(ob_pr_acts, i);
+		js_act = &js_pr->actions[i];
+		ret = cpfl_flow_js_pattern_per_act(object, js_act);
+		if (ret < 0) {
+			rte_free(js_pr->actions);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * The patterns object array defines a set of rules directing the PMD to match sequences of
+ * rte_flow protocol headers and translate them into profile/field vectors for each pipeline
+ * stage. This object is mandatory.
+ */
+static int
+cpfl_flow_js_pattern_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_t *ob_prs;
+	int i, len;
+
+	/* Pattern Rules */
+	ob_prs = json_object_get(ob_root, "patterns");
+	if (!ob_prs) {
+		PMD_DRV_LOG(ERR, "The patterns is mandatory.");
+		return -EINVAL;
+	}
+
+	len = json_array_size(ob_prs);
+	if (len == 0)
+		return 0;
+	parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len, 0);
+	if (!parser->patterns) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	parser->pr_size = len;
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		json_t *ob_pr_actions;
+		json_t *ob_pr_key;
+		json_t *ob_pr_key_protos;
+		json_t *ob_pr_key_attrs;
+		int ret;
+
+		object = json_array_get(ob_prs, i);
+		/* pr->key */
+		ob_pr_key = json_object_get(object, "key");
+		/* pr->key->protocols */
+		ob_pr_key_protos = json_object_get(ob_pr_key, "protocols");
+		ret = cpfl_flow_js_pattern_key_proto(ob_pr_key_protos, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->protocols.");
+			goto err;
+		}
+		/* pr->key->attributes */
+		ob_pr_key_attrs = json_object_get(ob_pr_key, "attributes");
+		ret = cpfl_flow_js_pattern_key_attr(ob_pr_key_attrs, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse key->attributes.");
+			goto err;
+		}
+		/* pr->actions */
+		ob_pr_actions = json_object_get(object, "actions");
+		ret = cpfl_flow_js_pattern_act(ob_pr_actions, &parser->patterns[i]);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->patterns);
+	return -EINVAL;
+}
+
+static int
+cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	int ret = 0;
+
+	ret = cpfl_flow_js_pattern_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char *filename)
+{
+	struct cpfl_flow_js_parser *parser;
+	json_error_t json_error;
+	json_t *root;
+	int ret;
+
+	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser), 0);
+	if (!parser) {
+		PMD_DRV_LOG(ERR, "Not enough memory to create flow parser.");
+		return -ENOMEM;
+	}
+	root = json_load_file(filename, 0, &json_error);
+	if (!root) {
+		PMD_DRV_LOG(ERR, "Bad JSON file \"%s\": %s", filename, json_error.text);
+		goto free_parser;
+	}
+	ret = cpfl_parser_init(root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parser init failed.");
+		goto free_parser;
+	}
+	*flow_parser = parser;
+	json_decref(root);
+
+	return 0;
+free_parser:
+	rte_free(parser);
+	return -EINVAL;
+}
+
+static void
+cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
+{
+	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM)
+		rte_free(pr_act->sem.fv);
+}
+
+int
+cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
+{
+	int i, j;
+
+	if (!parser)
+		return 0;
+
+	for (i = 0; i < parser->pr_size; i++) {
+		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
+
+		if (!pattern)
+			continue;
+		for (j = 0; j < pattern->key.proto_size; j++)
+			rte_free(pattern->key.protocols[j].fields);
+		rte_free(pattern->key.protocols);
+		rte_free(pattern->key.attributes);
+
+		for (j = 0; j < pattern->actions_size; j++) {
+			struct cpfl_flow_js_pr_action *pr_act;
+
+			pr_act = &pattern->actions[j];
+			cpfl_parser_free_pr_action(pr_act);
+		}
+		rte_free(pattern->actions);
+	}
+	rte_free(parser->patterns);
+	rte_free(parser);
+
+	return 0;
+}
+
+static int
+cpfl_get_items_length(const struct rte_flow_item *items)
+{
+	int length = 0;
+	const struct rte_flow_item *item = items;
+
+	while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
+		       uint16_t offset, uint8_t *fv)
+{
+	uint16_t v_layer, v_offset, v_mask;
+	enum rte_flow_item_type v_header;
+	int j, layer, length;
+	uint16_t temp_fv;
+
+	length = cpfl_get_items_length(items);
+	v_layer = js_fv->proto.layer;
+	v_header = js_fv->proto.header;
+	v_offset = js_fv->proto.offset;
+	v_mask = js_fv->proto.mask;
+	layer = 0;
+	for (j = 0; j < length - 1; j++) {
+		if (items[j].type == v_header) {
+			if (layer == v_layer) {
+				/* copy out 16 bits from offset */
+				const uint8_t *pointer;
+
+				pointer = &(((const uint8_t *)(items[j].spec))[v_offset]);
+				temp_fv = ntohs((*((const uint16_t *)pointer)) & v_mask);
+				fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+				fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+				break;
+			}
+			layer++;
+		} /* TODO: more type... */
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_fieldvectors(struct cpfl_itf *itf, struct cpfl_flow_js_fv *js_fvs, int size,
+			uint8_t *fv, const struct rte_flow_item *items)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		uint16_t offset, temp_fv, value_int;
+		enum cpfl_flow_js_fv_type type;
+		struct cpfl_flow_js_fv *js_fv;
+
+		js_fv = &js_fvs[i];
+		offset = js_fv->offset;
+		type = js_fv->type;
+		if (type == CPFL_FV_TYPE_IMMEDIATE) {
+			value_int = js_fv->immediate;
+			temp_fv = (value_int << 8) & 0xff00;
+			fv[2 * offset] = (uint8_t)(temp_fv >> 8);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+		} else if (type == CPFL_FV_TYPE_METADATA) {
+			uint16_t type, v_offset, mask;
+
+			type = js_fv->meta.type;
+			v_offset = js_fv->meta.offset;
+			mask = js_fv->meta.mask;
+			temp_fv = cpfl_metadata_read16(&itf->adapter->meta, type, v_offset) & mask;
+			fv[2 * offset] = (uint8_t)(temp_fv & 0x00ff);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv >> 8);
+		} else if (type == CPFL_FV_TYPE_PROTOCOL) {
+			ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv);
+			if (ret)
+				return ret;
+		} else {
+			PMD_DRV_LOG(DEBUG, "not support this type: %d.", type);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parse_pr_actions(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_pr_action *actions,
+		      int size,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		struct cpfl_flow_js_pr_action *pr_act;
+		enum cpfl_flow_pr_action_type type;
+
+		pr_act = &actions[i];
+		/* pr->actions->type */
+		type = pr_act->type;
+		/* pr->actions->data */
+		if (attr->group == 1  && type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+			struct cpfl_flow_js_pr_action_sem *sem = &pr_act->sem;
+
+			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+			pr_action->sem.prof = sem->prof;
+			pr_action->sem.subprof = sem->subprof;
+			pr_action->sem.keysize = sem->keysize;
+			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
+			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
+			ret = cpfl_parse_fieldvectors(itf, sem->fv, sem->fv_size,
+						      pr_action->sem.cpfl_flow_pr_fv, items);
+			return ret;
+		} else if (attr->group > 4 || attr->group == 0) {
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_eth_mask(const char *mask, struct rte_ether_addr addr)
+{
+	int i, ret;
+	struct rte_ether_addr mask_bytes;
+
+	ret = rte_ether_unformat_addr(mask, &mask_bytes);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "translate mac address from string to rte_ether_addr failed.");
+		return -EINVAL;
+	}
+	/* validate eth mask addr if match */
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+		if (mask_bytes.addr_bytes[i] != addr.addr_bytes[i])
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
+{
+	uint32_t out_addr;
+
+	/* 0: success; -EINVAL: invalid; -ENOTSUP: fail */
+	int ret = inet_pton(AF_INET, mask, &out_addr);
+
+	if (ret < 0)
+		return -EINVAL;
+	/* validate ipv4 mask addr if match */
+	if (out_addr != addr)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_eth *eth_mask)
+{
+	int field_size, j;
+	int flag_dst_addr, flag_src_addr, flag_ether_type;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !eth_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_ether_type = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name, *s_mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst, more see Field Mapping
+		 */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		if (strcmp(name, "src_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->src) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->dst) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "ether_type") == 0) {
+			uint16_t mask = (uint16_t)field->mask_32b;
+
+			if (mask != eth_mask->type)
+				return -EINVAL;
+			flag_ether_type = true;
+		} else {
+			/* TODO: more type... */
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (strcmp((const char *)eth_mask->src.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (strcmp((const char *)eth_mask->dst.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_ether_type) {
+		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+	int field_size, j;
+	int flag_next_proto_id, flag_src_addr, flag_dst_addr;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && ipv4_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !ipv4_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_next_proto_id = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+
+		field = &proto->fields[j];
+		name = field->name;
+		if (strcmp(name, "src_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.src_addr) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			const char *mask;
+
+			mask = field->mask;
+			if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.dst_addr) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "next_proto_id") == 0) {
+			uint8_t mask;
+
+			mask = (uint8_t)field->mask_32b;
+			if (mask != ipv4_mask->hdr.next_proto_id)
+				return -EINVAL;
+			flag_next_proto_id = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
+			return -EINVAL;
+	}
+	if (!flag_next_proto_id) {
+		if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_tcp *tcp_mask)
+{
+	int field_size, j;
+	int flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && tcp_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !tcp_mask)
+		return 0;
+
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (tcp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (tcp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (tcp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (tcp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_udp *udp_mask)
+{
+	int field_size, j;
+	bool flag_src_port, flag_dst_port;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && udp_mask)
+		return -EINVAL;
+	if (field_size == 0 && !udp_mask)
+		return 0;
+	flag_src_port = false;
+	flag_dst_port = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		uint16_t mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst */
+		name = field->name; /* match: rte_flow_item->mask */
+		mask = (uint16_t)field->mask_32b;
+		if (strcmp(name, "src_port") == 0) {
+			if (udp_mask->hdr.src_port != mask)
+				return -EINVAL;
+			flag_src_port = true;
+		} else if (strcmp(name, "dst_port") == 0) {
+			if (udp_mask->hdr.dst_port != mask)
+				return -EINVAL;
+			flag_dst_port = true;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name: %s.", name);
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_port) {
+		if (udp_mask->hdr.src_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+	if (!flag_dst_port) {
+		if (udp_mask->hdr.dst_port != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto,
+		 const struct rte_flow_item_vxlan *vxlan_mask)
+{
+	int field_size, j;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if (field_size != 0 && !vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && vxlan_mask)
+		return -EINVAL;
+	if (field_size == 0 && !vxlan_mask)
+		return 0;
+	for (j = 0; j < field_size; j++) {
+		const char *name;
+		int64_t mask;
+
+		field = &proto->fields[j];
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		mask = (int64_t)field->mask_32b;
+		if (strcmp(name, "vx_vni") == 0) {
+			if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) != mask)
+				return -EINVAL;
+		} else {
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_icmp *icmp_mask)
+{
+	int field_size;
+
+	if (!proto)
+		return 0;
+	field_size = proto->fields_size;
+	if ((field_size != 0 && !icmp_mask) || (field_size == 0 && icmp_mask))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
+			     int proto_size,
+			     const struct rte_flow_item *items)
+{
+	int i, length;
+	int j = 0;
+
+	length = cpfl_get_items_length(items);
+	if (proto_size > length - 1)
+		return -EINVAL;
+	for (i = 0; i < proto_size; i++) {
+		struct cpfl_flow_js_pr_key_proto *key_proto;
+		enum rte_flow_item_type type;
+
+		key_proto = &protocols[i];
+		/* pr->key->proto->type */
+		type = key_proto->type;
+		/* pr->key->proto->fields */
+		switch (type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
+				const struct rte_flow_item_eth *eth_mask;
+				int ret;
+
+				eth_mask = (const struct rte_flow_item_eth *)items[i].mask;
+				ret = cpfl_check_eth(key_proto, eth_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				const struct rte_flow_item_ipv4 *ipv4_mask;
+				int ret;
+
+				ipv4_mask = (const struct rte_flow_item_ipv4 *)items[i].mask;
+				ret = cpfl_check_ipv4(key_proto, ipv4_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) {
+				const struct rte_flow_item_tcp *tcp_mask;
+				int ret;
+
+				tcp_mask = (const struct rte_flow_item_tcp *)items[i].mask;
+				ret = cpfl_check_tcp(key_proto, tcp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) {
+				const struct rte_flow_item_udp *udp_mask;
+				int ret;
+
+				udp_mask = (const struct rte_flow_item_udp *)items[i].mask;
+				ret = cpfl_check_udp(key_proto, udp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+				const struct rte_flow_item_vxlan *vxlan_mask;
+				int ret;
+
+				vxlan_mask = (const struct rte_flow_item_vxlan *)items[i].mask;
+				ret = cpfl_check_vxlan(key_proto, vxlan_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) {
+				const struct rte_flow_item_icmp *icmp_mask;
+				int ret;
+
+				icmp_mask = (const struct rte_flow_item_icmp *)items[i].mask;
+				ret = cpfl_check_icmp(key_proto, icmp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+	if (items[j].type != RTE_FLOW_ITEM_TYPE_END)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr,
+			    const struct rte_flow_attr *attr)
+{
+	if (key_attr->ingress != attr->ingress) {
+		PMD_DRV_LOG(DEBUG, "ingress not match.");
+		return -EINVAL;
+	}
+	if (key_attr->egress != attr->egress) {
+		PMD_DRV_LOG(DEBUG, "egress not match.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern,
+		       const struct rte_flow_item *items,
+		       const struct rte_flow_attr *attr)
+{
+	int ret;
+
+	/* pr->key */
+	/* pr->key->protocols */
+	ret = cpfl_check_pattern_key_proto(pattern->key.protocols,
+					   pattern->key.proto_size, items);
+	if (ret < 0)
+		return -EINVAL;
+	/* pr->key->attributes */
+	ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr);
+	if (ret < 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* output: struct cpfl_flow_pr_action* pr_action */
+int
+cpfl_flow_parse_items(struct cpfl_itf *itf,
+		      struct cpfl_flow_js_parser *parser,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, size;
+	struct cpfl_flow_js_pr *pattern;
+
+	size = parser->pr_size;
+	for (i = 0; i < size; i++) {
+		int ret;
+
+		pattern = &parser->patterns[i];
+		ret = cpfl_check_pattern_key(pattern, items, attr);
+		if (ret < 0)
+			continue;
+		/* pr->actions */
+		ret = cpfl_parse_pr_actions(itf, pattern->actions, pattern->actions_size,
+					    items, attr, pr_action);
+		return ret;
+	}
+
+	return -EINVAL;
+}
+
+bool
+cpfl_metadata_write_port_id(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 5;
+
+	dev_id = cpfl_get_port_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID\n");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 3);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_targetvsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 2;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 1);
+
+	return true;
+}
+
+bool
+cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 6;
+	const int offset = 0;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
+
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf)
+{
+	uint16_t dev_id;
+	const int type = 0;
+	const int offset = 24;
+
+	dev_id = cpfl_get_vsi_id(itf);
+	if (dev_id == CPFL_INVALID_HW_ID) {
+		PMD_DRV_LOG(ERR, "fail to get hw ID");
+		return false;
+	}
+	cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id);
+
+	return true;
+}
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
new file mode 100644
index 0000000000..268e1bc89f
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <jansson.h>
+#include <rte_flow.h>
+
+#include "cpfl_ethdev.h"
+
+#ifndef _CPFL_FLOW_PARSER_H_
+#define _CPFL_FLOW_PARSER_H_
+
+#define CPFL_FLOW_JSON_STR_SIZE_MAX 100
+#define CPFL_MAX_SEM_FV_KEY_SIZE 64
+#define CPFL_FLOW_JS_PROTO_SIZE 16
+#define CPFL_MOD_KEY_NUM_MAX 8
+
+/* Pattern Rules Storage */
+enum cpfl_flow_pr_action_type {
+	CPFL_JS_PR_ACTION_TYPE_SEM,
+	CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
+};
+
+/* This structure matches a sequence of fields in struct rte_flow_attr */
+struct cpfl_flow_js_pr_key_attr {
+	uint16_t ingress;
+	uint16_t egress;
+};
+
+struct cpfl_flow_js_pr_key_proto_field {
+	char name[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	union {
+		char mask[CPFL_FLOW_JSON_STR_SIZE_MAX];
+		uint32_t mask_32b;
+	};
+};
+
+/* This structure matches a sequence of "struct rte_flow_item" */
+struct cpfl_flow_js_pr_key_proto {
+	enum rte_flow_item_type type;
+	struct cpfl_flow_js_pr_key_proto_field *fields;
+	int fields_size;
+};
+
+enum cpfl_flow_js_fv_type {
+	CPFL_FV_TYPE_PROTOCOL,
+	CPFL_FV_TYPE_IMMEDIATE,
+	CPFL_FV_TYPE_METADATA,
+	CPFL_FV_TYPE_UNKNOWN = -1,
+};
+
+struct cpfl_flow_js_fv {
+	uint16_t offset;
+	enum cpfl_flow_js_fv_type type;
+	union {
+		/*  a 16 bits value */
+		uint16_t immediate;
+		/* a reference to a protocol header with a <header, layer, offset, mask> tuple */
+		struct {
+			enum rte_flow_item_type header;
+			uint16_t layer;
+			uint16_t offset;
+			uint16_t mask;
+		} proto;
+		/* a reference to a metadata */
+		struct {
+			uint16_t type;
+			uint16_t offset;
+			uint16_t mask;
+		} meta;
+	};
+};
+
+/**
+ * This structure defines the message be used to composite the
+ * profile / key of a SEM control packet
+ */
+struct cpfl_flow_js_pr_action_sem {
+	uint16_t prof;		    /* SEM profile ID */
+	uint16_t subprof;	    /* SEM subprofile ID */
+	uint16_t keysize;	    /*  extract key size in bytes */
+	struct cpfl_flow_js_fv *fv; /* A SEM field vector array */
+	int fv_size;
+};
+
+/* define how to map current key to low level pipeline configuration */
+struct cpfl_flow_js_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_js_pr_action_sem sem;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD how to parse rte_flow
+ * protocol headers. Each rule be described by a key object and a action array.
+ */
+struct cpfl_flow_js_pr {
+	struct {
+		struct cpfl_flow_js_pr_key_proto *protocols;
+		uint16_t proto_size;
+		struct cpfl_flow_js_pr_key_attr *attributes;
+		uint16_t attr_size;
+	} key;
+	/* An array to define how to map current key to low level pipeline configuration. */
+	struct cpfl_flow_js_pr_action *actions;
+	uint16_t actions_size;
+};
+
+struct cpfl_flow_js_parser {
+	struct cpfl_flow_js_pr *patterns;
+	int pr_size;
+};
+
+/* Pattern Rules */
+struct cpfl_flow_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	uint8_t cpfl_flow_pr_fv[CPFL_MAX_SEM_FV_KEY_SIZE];
+};
+
+struct cpfl_flow_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_pr_action_sem sem;
+	};
+};
+
+int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
+int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
+int cpfl_flow_parse_items(struct cpfl_itf *itf,
+			  struct cpfl_flow_js_parser *parser,
+			  const struct rte_flow_item *items,
+			  const struct rte_flow_attr *attr,
+			  struct cpfl_flow_pr_action *pr_action);
+bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
+bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
+bool cpfl_metadata_write_sourcevsi(struct cpfl_itf *itf);
+
+static inline void
+cpfl_metadata_init(struct cpfl_metadata *meta)
+{
+	int i;
+
+	for (i = 0; i < CPFL_META_LENGTH; i++)
+		meta->chunks[i].type = i;
+}
+
+static inline void
+cpfl_metadata_write16(struct cpfl_metadata *meta, int type, int offset, uint16_t data)
+{
+	memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint16_t));
+}
+
+static inline void
+cpfl_metadata_write32(struct cpfl_metadata *meta, int type, int offset, uint32_t data)
+{
+	memcpy(&meta->chunks[type].data[offset], &data, sizeof(uint32_t));
+}
+
+static inline uint16_t
+cpfl_metadata_read16(struct cpfl_metadata *meta, int type, int offset)
+{
+	return *((uint16_t *)(&meta->chunks[type].data[offset]));
+}
+
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d8b92ae16a..d767818eb7 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -38,3 +38,10 @@ if arch_subdir == 'x86'
         cflags += ['-DCC_AVX512_SUPPORT']
     endif
 endif
+
+if dpdk_conf.has('RTE_HAS_JANSSON')
+    sources += files(
+            'cpfl_flow_parser.c',
+    )
+    ext_deps += jansson_dep
+endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 2/9] net/cpfl: build action mapping rules from JSON
  2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
  2023-10-09  4:00                 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
@ 2023-10-09  4:00                 ` Zhang, Yuying
  2023-10-09  4:00                 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
                                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-10-09  4:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: Wenjing Qiao

From: Wenjing Qiao <wenjing.qiao@intel.com>

Build rules that maps from an rte_flow action vxlan_encap or
vxlan_decap to hardware representations.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_flow_parser.c | 538 +++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_flow_parser.h | 100 ++++++
 2 files changed, 637 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
index a5fff5a857..0e623494a2 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.c
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -28,6 +28,18 @@ cpfl_get_item_type_by_str(const char *type)
 	return RTE_FLOW_ITEM_TYPE_VOID;
 }
 
+static enum rte_flow_action_type
+cpfl_get_action_type_by_str(const char *type)
+{
+	if (strcmp(type, "vxlan_encap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+	else if (strcmp(type, "vxlan_decap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ACTION_TYPE_VOID;
+}
+
 static const char *
 cpfl_json_t_to_string(json_t *object, const char *name)
 {
@@ -46,6 +58,29 @@ cpfl_json_t_to_string(json_t *object, const char *name)
 	return json_string_value(subobject);
 }
 
+static int
+cpfl_json_t_to_int(json_t *object, const char *name, int *value)
+{
+	json_t *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	if (!json_is_integer(subobject)) {
+		PMD_DRV_LOG(ERR, "%s is not an integer.", name);
+		return -EINVAL;
+	}
+	*value = (int)json_integer_value(subobject);
+
+	return 0;
+}
+
 static int
 cpfl_json_t_to_uint16(json_t *object, const char *name, uint16_t *value)
 {
@@ -518,6 +553,228 @@ cpfl_flow_js_pattern_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 	return -EINVAL;
 }
 
+static int
+cpfl_flow_js_mr_key(json_t *ob_mr_keys, struct cpfl_flow_js_mr_key *js_mr_key)
+{
+	int len, i;
+
+	len = json_array_size(ob_mr_keys);
+	if (len == 0)
+		return 0;
+	js_mr_key->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_key_action) * len, 0);
+	if (!js_mr_key->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_mr_key->actions_size = len;
+	for (i = 0; i < len; i++) {
+		json_t *object, *ob_data;
+		const char *type;
+		enum rte_flow_action_type act_type;
+
+		object = json_array_get(ob_mr_keys, i);
+		/* mr->key->actions->type */
+		type = cpfl_json_t_to_string(object, "type");
+		if (!type) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			goto err;
+		}
+		act_type = cpfl_get_action_type_by_str(type);
+		if (act_type == RTE_FLOW_ACTION_TYPE_VOID)
+			goto err;
+		js_mr_key->actions[i].type = act_type;
+		/* mr->key->actions->data */
+		ob_data = json_object_get(object, "data");
+		if (js_mr_key->actions[i].type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			json_t *ob_protos;
+			int proto_size, j;
+			struct cpfl_flow_js_mr_key_action_vxlan_encap *encap;
+
+			ob_protos = json_object_get(ob_data, "protocols");
+			encap = &js_mr_key->actions[i].encap;
+			if (!ob_protos) {
+				encap->proto_size = 0;
+				continue;
+			}
+			proto_size = json_array_size(ob_protos);
+			encap->proto_size = proto_size;
+			for (j = 0; j < proto_size; j++) {
+				const char *s;
+				json_t *subobject;
+				enum rte_flow_item_type proto_type;
+
+				subobject = json_array_get(ob_protos, j);
+				s = json_string_value(subobject);
+				proto_type = cpfl_get_item_type_by_str(s);
+				if (proto_type == RTE_FLOW_ITEM_TYPE_VOID) {
+					PMD_DRV_LOG(ERR, "parse VXLAN_ENCAP failed.");
+					goto err;
+				}
+				encap->protocols[j] = proto_type;
+			}
+		} else if (js_mr_key->actions[i].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			PMD_DRV_LOG(ERR, "not support this type: %d.", js_mr_key->actions[i].type);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mr_key->actions);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_layout(json_t *ob_layouts, struct cpfl_flow_js_mr_action_mod *js_mod)
+{
+	int len, i;
+
+	len = json_array_size(ob_layouts);
+	js_mod->layout_size = len;
+	if (len == 0)
+		return 0;
+	js_mod->layout = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_layout) * len, 0);
+	if (!js_mod->layout) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_t *object;
+		int index = 0, size = 0, offset = 0;
+		int ret;
+		const char *hint;
+
+		object = json_array_get(ob_layouts, i);
+		ret = cpfl_json_t_to_int(object, "index", &index);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'index'.");
+			goto err;
+		}
+		js_mod->layout[i].index = index;
+		ret = cpfl_json_t_to_int(object, "size", &size);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+			goto err;
+		}
+		js_mod->layout[i].size = size;
+		ret = cpfl_json_t_to_int(object, "offset", &offset);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			goto err;
+		}
+		js_mod->layout[i].offset = offset;
+		hint = cpfl_json_t_to_string(object, "hint");
+		if (!hint) {
+			PMD_DRV_LOG(ERR, "Can not parse string 'hint'.");
+			goto err;
+		}
+		memcpy(js_mod->layout[i].hint, hint, strlen(hint));
+	}
+
+	return 0;
+
+err:
+	rte_free(js_mod->layout);
+	return -EINVAL;
+}
+
+static int
+cpfl_flow_js_mr_action(json_t *ob_mr_act, struct cpfl_flow_js_mr_action *js_mr_act)
+{
+	json_t *ob_data;
+	const char *type;
+
+	/* mr->action->type */
+	type = cpfl_json_t_to_string(ob_mr_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* mr->action->data */
+	ob_data = json_object_get(ob_mr_act, "data");
+	if (strcmp(type, "mod") == 0) {
+		json_t *ob_layouts;
+		uint16_t profile = 0;
+		int ret;
+
+		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		ret = cpfl_json_t_to_uint16(ob_data, "profile", &profile);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		js_mr_act->mod.prof = profile;
+		ob_layouts = json_object_get(ob_data, "layout");
+		ret = cpfl_flow_js_mr_layout(ob_layouts, &js_mr_act->mod);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse layout.");
+			return ret;
+		}
+	} else  {
+		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * The modifications object array defines a set of rules for the PMD to match rte_flow
+ * modification actions and translate them into the Modification profile. This object
+ * is optional.
+ */
+static int
+cpfl_flow_js_mod_rule(json_t *ob_root, struct cpfl_flow_js_parser *parser)
+{
+	json_t *ob_mrs;
+	int i, len;
+
+	ob_mrs = json_object_get(ob_root, "modifications");
+	if (!ob_mrs) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+	len = json_array_size(ob_mrs);
+	if (len == 0)
+		return 0;
+	parser->mr_size = len;
+	parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr) * len, 0);
+	if (!parser->modifications) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	for (i = 0; i < len; i++) {
+		int ret;
+		json_t *object, *ob_mr_key, *ob_mr_action, *ob_mr_key_action;
+
+		object = json_array_get(ob_mrs, i);
+		/* mr->key */
+		ob_mr_key = json_object_get(object, "key");
+		/* mr->key->actions */
+		ob_mr_key_action = json_object_get(ob_mr_key, "actions");
+		ret = cpfl_flow_js_mr_key(ob_mr_key_action, &parser->modifications[i].key);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_key failed.");
+			goto err;
+		}
+		/* mr->action */
+		ob_mr_action = json_object_get(object, "action");
+		ret = cpfl_flow_js_mr_action(ob_mr_action, &parser->modifications[i].action);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "parse mr_action failed.");
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	rte_free(parser->modifications);
+	return -EINVAL;
+}
+
 static int
 cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 {
@@ -528,6 +785,11 @@ cpfl_parser_init(json_t *ob_root, struct cpfl_flow_js_parser *parser)
 		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
 		return ret;
 	}
+	ret = cpfl_flow_js_mod_rule(ob_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
+		return ret;
+	}
 
 	return 0;
 }
@@ -598,6 +860,15 @@ cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
 		rte_free(pattern->actions);
 	}
 	rte_free(parser->patterns);
+	for (i = 0; i < parser->mr_size; i++) {
+		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
+
+		if (!mr)
+			continue;
+		rte_free(mr->key.actions);
+		rte_free(mr->action.mod.layout);
+	}
+	rte_free(parser->modifications);
 	rte_free(parser);
 
 	return 0;
@@ -614,6 +885,17 @@ cpfl_get_items_length(const struct rte_flow_item *items)
 	return length;
 }
 
+static int
+cpfl_get_actions_length(const struct rte_flow_action *actions)
+{
+	int length = 0;
+	const struct rte_flow_action *action = actions;
+
+	while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END)
+		continue;
+	return length;
+}
+
 static int
 cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
 		       uint16_t offset, uint8_t *fv)
@@ -642,7 +924,7 @@ cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item
 				break;
 			}
 			layer++;
-		} /* TODO: more type... */
+		}
 	}
 
 	return 0;
@@ -1231,6 +1513,260 @@ cpfl_flow_parse_items(struct cpfl_itf *itf,
 	return -EINVAL;
 }
 
+/* modifications rules */
+static int
+cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap *encap,
+			       const struct rte_flow_action *action)
+{
+	const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+	struct rte_flow_item *definition;
+	int def_length, i, proto_size;
+
+	action_vxlan_encap = (const struct rte_flow_action_vxlan_encap *)action->conf;
+	definition = action_vxlan_encap->definition;
+	def_length = cpfl_get_items_length(definition);
+	proto_size = encap->proto_size;
+	if (proto_size != def_length - 1) {
+		PMD_DRV_LOG(DEBUG, "protocols not match.");
+		return -EINVAL;
+	}
+	for (i = 0; i < proto_size; i++) {
+		enum rte_flow_item_type proto;
+
+		proto = encap->protocols[i];
+		if (proto == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) {
+				PMD_DRV_LOG(DEBUG, "protocols not match.");
+				return -EINVAL;
+			}
+		} else if (proto != definition[i].type) {
+			PMD_DRV_LOG(DEBUG, "protocols not match.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* check and parse */
+static int
+cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int size,
+			 const struct rte_flow_action *actions,
+			 struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int actions_length, i;
+	int j = 0;
+	int ret;
+
+	actions_length = cpfl_get_actions_length(actions);
+	if (size > actions_length - 1)
+		return -EINVAL;
+	for (i = 0; i < size; i++) {
+		enum rte_flow_action_type type;
+		struct cpfl_flow_js_mr_key_action *key_act;
+
+		key_act = &key_acts[i];
+		/* mr->key->actions->type */
+		type = key_act->type;
+		/* mr->key->actions->data */
+		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			int proto_size, k;
+			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
+
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+			mr_key_action[i].encap.action = &actions[j];
+			encap = &mr_key_action[i].encap;
+
+			proto_size = key_act->encap.proto_size;
+			encap->proto_size = proto_size;
+			for (k = 0; k < proto_size; k++) {
+				enum rte_flow_item_type proto;
+
+				proto = key_act->encap.protocols[k];
+				encap->protocols[k] = proto;
+			}
+			ret = cpfl_check_actions_vxlan_encap(encap, &actions[j]);
+			if (ret < 0)
+				return -EINVAL;
+			j++;
+		} else if (type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
+			j++;
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+/* output: uint8_t *buffer, uint16_t *byte_len */
+static int
+cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
+		  struct cpfl_flow_mr_key_action *mr_key_action,
+		  uint8_t *buffer, uint16_t *byte_len)
+{
+	int i;
+	int start = 0;
+
+	for (i = 0; i < layout_size; i++) {
+		int index, size, offset;
+		const char *hint;
+		const uint8_t *addr = NULL;
+		struct cpfl_flow_mr_key_action *temp;
+		struct cpfl_flow_js_mr_layout *layout;
+
+		layout = &layouts[i];
+		/* index links to the element of the actions array. */
+		index = layout->index;
+		size = layout->size;
+		offset = layout->offset;
+		if (index == -1) {
+			hint = "dummpy";
+			start += size;
+			continue;
+		}
+		hint = layout->hint;
+		temp = mr_key_action + index;
+		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+			struct rte_flow_item *definition;
+			int def_length, k;
+
+			action_vxlan_encap =
+			    (const struct rte_flow_action_vxlan_encap *)temp->encap.action->conf;
+			definition = action_vxlan_encap->definition;
+			def_length = cpfl_get_items_length(definition);
+			for (k = 0; k < def_length - 1; k++) {
+				if ((strcmp(hint, "eth") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_ETH) ||
+				    (strcmp(hint, "ipv4") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+				    (strcmp(hint, "udp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_UDP) ||
+				    (strcmp(hint, "tcp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_TCP) ||
+				    (strcmp(hint, "vxlan") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_VXLAN)) {
+					addr = (const uint8_t *)(definition[k].spec);
+					if (start > 255) {
+						*byte_len = 0;
+						PMD_DRV_LOG(ERR, "byte length is too long: %s",
+							    hint);
+						return -EINVAL;
+					}
+					memcpy(buffer + start, addr + offset, size);
+					break;
+				} /* TODO: more hint... */
+			}
+			if (k == def_length - 1) {
+				*byte_len = 0;
+				PMD_DRV_LOG(ERR, "can not find corresponding hint: %s", hint);
+				return -EINVAL;
+			}
+		} else {
+			*byte_len = 0;
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", temp->type);
+			return -EINVAL;
+		} /* else TODO: more type... */
+		start += size;
+	}
+	*byte_len = start;
+
+	return 0;
+}
+
+static int
+cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action,
+		     struct cpfl_flow_mr_key_action *mr_key_action,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	enum cpfl_flow_mr_action_type type;
+
+	/* mr->action->type */
+	type = action->type;
+	/* mr->action->data */
+	if (type == CPFL_JS_MR_ACTION_TYPE_MOD) {
+		struct cpfl_flow_js_mr_layout *layout;
+
+		mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		mr_action->mod.byte_len = 0;
+		mr_action->mod.prof = action->mod.prof;
+		layout = action->mod.layout;
+		if (!layout)
+			return 0;
+		memset(mr_action->mod.data, 0, sizeof(mr_action->mod.data));
+
+		return cpfl_parse_layout(layout, action->mod.layout_size, mr_key_action,
+					 mr_action->mod.data, &mr_action->mod.byte_len);
+	}
+	PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+
+	return -EINVAL;
+}
+
+static int
+cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action *actions,
+		   struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int key_action_size;
+
+	/* mr->key->actions */
+	key_action_size = mr->key.actions_size;
+	return cpfl_parse_mr_key_action(mr->key.actions, key_action_size, actions, mr_key_action);
+}
+
+/* output: struct cpfl_flow_mr_action *mr_action */
+static int
+cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	int i;
+	struct cpfl_flow_mr_key_action mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0};
+
+	for (i = 0; i < parser->mr_size; i++) {
+		int ret;
+		struct cpfl_flow_js_mr *mr;
+
+		mr = &parser->modifications[i];
+		if (!mr)
+			return -EINVAL;
+		ret = cpfl_check_mod_key(mr, actions, mr_key_action);
+		if (ret < 0)
+			continue;
+		/* mr->action */
+		return cpfl_parse_mr_action(&mr->action, mr_key_action, mr_action);
+	}
+
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+			struct cpfl_flow_mr_action *mr_action)
+{
+	/* modifications rules */
+	if (!parser->modifications) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	return cpfl_parse_mod_rules(parser, actions, mr_action);
+}
+
 bool
 cpfl_metadata_write_port_id(struct cpfl_itf *itf)
 {
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
index 268e1bc89f..962667adc2 100644
--- a/drivers/net/cpfl/cpfl_flow_parser.h
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -106,9 +106,79 @@ struct cpfl_flow_js_pr {
 	uint16_t actions_size;
 };
 
+/* Modification Rules Storage */
+/**
+ * The vxlan_encap action matches RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
+ * and include a sequence of protocol headers defined in field protocols
+ * of data.
+ */
+struct cpfl_flow_js_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	int proto_size;
+};
+
+/* A set of modification rte_flow_action_xxx objects can be defined as a type / data pair. */
+struct cpfl_flow_js_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_js_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_js_mr_key {
+	struct cpfl_flow_js_mr_key_action *actions;
+	int actions_size;
+};
+
+struct cpfl_flow_js_mr_layout {
+	int index;				/* links to the element of the actions array */
+	char hint[CPFL_FLOW_JSON_STR_SIZE_MAX]; /* where the data to copy from */
+	uint16_t offset;			/* the start byte of the data to copy from */
+	uint16_t size; /*  bytes of the data to be copied to the memory region */
+};
+
+/** For mod data, besides the profile ID, a layout array defines a set of hints that helps
+ * driver composing the MOD memory region when the action need to insert/update some packet
+ * data from user input.
+ */
+struct cpfl_flow_js_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	struct cpfl_flow_js_mr_layout *layout;
+	int layout_size;
+};
+
+enum cpfl_flow_mr_action_type {
+	CPFL_JS_MR_ACTION_TYPE_MOD,
+};
+
+/** Currently, the type can only be mod.
+ *
+ * For mod data, besides the profile ID, a layout array defines a set
+ * of hints that helps driver composing the MOD memory region when the
+ * action need to insert/update some packet data from user input.
+ */
+struct cpfl_flow_js_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_js_mr_action_mod mod;
+	};
+};
+
+/**
+ * This structure defines a set of rules that direct PMD to parse rte_flow modification
+ * actions. Each rule be described by a pair of key and action
+ */
+struct cpfl_flow_js_mr {
+	struct cpfl_flow_js_mr_key key;
+	struct cpfl_flow_js_mr_action action;
+};
+
 struct cpfl_flow_js_parser {
 	struct cpfl_flow_js_pr *patterns;
 	int pr_size;
+	struct cpfl_flow_js_mr *modifications;
+	int mr_size;
 };
 
 /* Pattern Rules */
@@ -126,6 +196,33 @@ struct cpfl_flow_pr_action {
 	};
 };
 
+/* Modification Rules */
+struct cpfl_flow_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	uint16_t proto_size;
+	const struct rte_flow_action *action;
+};
+
+struct cpfl_flow_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	uint8_t data[256];
+};
+
+struct cpfl_flow_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_mr_action_mod mod;
+	};
+};
+
 int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
 int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
 int cpfl_flow_parse_items(struct cpfl_itf *itf,
@@ -133,6 +230,9 @@ int cpfl_flow_parse_items(struct cpfl_itf *itf,
 			  const struct rte_flow_item *items,
 			  const struct rte_flow_attr *attr,
 			  struct cpfl_flow_pr_action *pr_action);
+int cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser,
+			    const struct rte_flow_action *actions,
+			    struct cpfl_flow_mr_action *mr_action);
 bool cpfl_metadata_write_port_id(struct cpfl_itf *itf);
 bool cpfl_metadata_write_vsi(struct cpfl_itf *itf);
 bool cpfl_metadata_write_targetvsi(struct cpfl_itf *itf);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton
  2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
  2023-10-09  4:00                 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
  2023-10-09  4:00                 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
@ 2023-10-09  4:00                 ` Zhang, Yuying
  2023-10-09  4:00                 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
                                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-10-09  4:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Set up the rte_flow backend skeleton. Introduce the framework
to support different engines as rte_flow backend. Bridge rte_flow
driver API to flow engines.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c |  53 ++++++
 drivers/net/cpfl/cpfl_ethdev.h |   5 +
 drivers/net/cpfl/cpfl_flow.c   | 339 +++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow.h   |  85 +++++++++
 drivers/net/cpfl/meson.build   |   1 +
 5 files changed, 483 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow.c
 create mode 100644 drivers/net/cpfl/cpfl_flow.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 1745f703c8..c350728861 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -15,6 +15,7 @@
 #include "cpfl_ethdev.h"
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1074,6 +1075,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static void
+cpfl_flow_free(struct cpfl_vport *vport)
+{
+	struct rte_flow *p_flow;
+
+	while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) {
+		TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next);
+		if (p_flow->engine->free)
+			p_flow->engine->free(p_flow);
+		rte_free(p_flow);
+	}
+}
+
 static int
 cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
 {
@@ -1105,6 +1119,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
 		cpfl_p2p_queue_grps_del(vport);
 
+	cpfl_flow_free(cpfl_vport);
 	idpf_vport_deinit(vport);
 	rte_free(cpfl_vport->p2p_q_chunks_info);
 
@@ -1117,6 +1132,29 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	return 0;
 }
 
+static int
+cpfl_dev_flow_ops_get(struct rte_eth_dev *dev,
+		      const struct rte_flow_ops **ops)
+{
+	struct cpfl_itf *itf;
+
+	if (!dev)
+		return -EINVAL;
+
+	itf = CPFL_DEV_TO_ITF(dev);
+
+	/* only vport support rte_flow */
+	if (itf->type != CPFL_ITF_TYPE_VPORT)
+		return -ENOTSUP;
+#ifdef RTE_HAS_JANSSON
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static int
 cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 			    size_t len, uint32_t tx)
@@ -1318,6 +1356,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
 	.xstats_get			= cpfl_dev_xstats_get,
 	.xstats_get_names		= cpfl_dev_xstats_get_names,
 	.xstats_reset			= cpfl_dev_xstats_reset,
+	.flow_ops_get			= cpfl_dev_flow_ops_get,
 	.hairpin_cap_get		= cpfl_hairpin_cap_get,
 	.rx_hairpin_queue_setup		= cpfl_rx_hairpin_queue_setup,
 	.tx_hairpin_queue_setup		= cpfl_tx_hairpin_queue_setup,
@@ -2021,6 +2060,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+#ifdef RTE_HAS_JANSSON
+	ret = cpfl_flow_init(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init flow module");
+		goto err_flow_init;
+	}
+#endif
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2028,6 +2074,9 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+#ifdef RTE_HAS_JANSSON
+err_flow_init:
+#endif
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2182,6 +2231,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
 	cpfl_vport->itf.adapter = adapter;
 	cpfl_vport->itf.data = dev->data;
+	TAILQ_INIT(&cpfl_vport->itf.flow_list);
 	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
@@ -2262,6 +2312,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+#ifdef RTE_HAS_JANSSON
+	cpfl_flow_uninit(adapter);
+#endif
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 383dbd14c6..69bf32cfbd 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -140,9 +140,12 @@ enum cpfl_itf_type {
 	CPFL_ITF_TYPE_REPRESENTOR,
 };
 
+TAILQ_HEAD(cpfl_flow_list, rte_flow);
+
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
+	struct cpfl_flow_list flow_list;
 	void *data;
 };
 
@@ -206,6 +209,8 @@ struct cpfl_adapter_ext {
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_allowlist_hash;
 
+	struct cpfl_flow_js_parser *flow_parser;
+
 	struct cpfl_metadata meta;
 };
 
diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c
new file mode 100644
index 0000000000..03dd1ffa44
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.c
@@ -0,0 +1,339 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "cpfl_flow.h"
+#include "cpfl_flow_parser.h"
+
+TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine);
+
+static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list);
+
+void
+cpfl_flow_engine_register(struct cpfl_flow_engine *engine)
+{
+	TAILQ_INSERT_TAIL(&engine_list, engine, node);
+}
+
+struct cpfl_flow_engine *
+cpfl_flow_engine_match(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->parse_pattern_action)
+			continue;
+
+		if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0)
+			continue;
+		return engine;
+	}
+
+	return NULL;
+}
+
+int
+cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+	int ret;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (!engine->init) {
+			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
+				     engine->type);
+			return -ENOTSUP;
+		}
+
+		ret = engine->init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
+				     engine->type);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	void *temp;
+
+	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
+		if (engine->uninit)
+			engine->uninit(adapter);
+	}
+}
+
+static int
+cpfl_flow_attr_valid(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	if (attr->priority > CPFL_PREC_MAX) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Only support priority 0-7.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_param_valid(const struct rte_flow_attr *attr,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      struct rte_flow_error *error)
+{
+	int ret;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = cpfl_flow_attr_valid(attr, error);
+	if (ret)
+		return ret;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+__cpfl_flow_validate(struct rte_eth_dev *dev,
+		     const struct rte_flow_attr *attr,
+		     const struct rte_flow_item pattern[],
+		     const struct rte_flow_action actions[],
+		     void **meta,
+		     struct cpfl_flow_engine **engine,
+		     struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = cpfl_flow_param_valid(attr, pattern, actions, error);
+	if (ret)
+		return ret;
+
+	*engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta);
+	if (!*engine) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched engine.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+int
+cpfl_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	struct cpfl_flow_engine *engine = NULL;
+	int ret;
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error);
+
+	return ret;
+}
+
+struct rte_flow *
+cpfl_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_engine *engine = NULL;
+	struct rte_flow *flow;
+	void *meta;
+	int ret;
+
+	flow = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return NULL;
+	}
+
+	ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	if (!engine->create) {
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				   NULL, "No matched flow creation function");
+		rte_free(flow);
+		return NULL;
+	}
+
+	ret = engine->create(dev, flow, meta, error);
+	if (ret) {
+		rte_free(flow);
+		return NULL;
+	}
+
+	flow->engine = engine;
+	TAILQ_INSERT_TAIL(&itf->flow_list, flow, next);
+
+	return flow;
+}
+
+int
+cpfl_flow_destroy(struct rte_eth_dev *dev,
+		  struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	int ret = 0;
+
+	if (!flow || !flow->engine || !flow->engine->destroy) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	ret = flow->engine->destroy(dev, flow, error);
+	if (!ret)
+		TAILQ_REMOVE(&itf->flow_list, flow, next);
+	else
+		PMD_DRV_LOG(ERR, "Failed to destroy flow");
+
+	return ret;
+}
+
+int
+cpfl_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct rte_flow *p_flow;
+	void *temp;
+	int ret = 0;
+
+	RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) {
+		ret = cpfl_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to flush flows");
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int
+cpfl_flow_query(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		const struct rte_flow_action *actions,
+		void *data,
+		struct rte_flow_error *error)
+{
+	struct rte_flow_query_count *count = data;
+	int ret = -EINVAL;
+
+	if (!flow || !flow->engine || !flow->engine->query_count) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "Invalid flow");
+		return -rte_errno;
+	}
+
+	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+		switch (actions->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		case RTE_FLOW_ACTION_TYPE_COUNT:
+			ret = flow->engine->query_count(dev, flow, count, error);
+			break;
+		default:
+			ret = rte_flow_error_set(error, ENOTSUP,
+						 RTE_FLOW_ERROR_TYPE_ACTION,
+						 actions,
+						 "action not supported");
+			break;
+		}
+	}
+
+	return ret;
+}
+
+const struct rte_flow_ops cpfl_flow_ops = {
+	.validate = cpfl_flow_validate,
+	.create = cpfl_flow_create,
+	.destroy = cpfl_flow_destroy,
+	.flush = cpfl_flow_flush,
+	.query = cpfl_flow_query,
+};
+
+int
+cpfl_flow_init(struct cpfl_adapter_ext *ad)
+{
+	int ret;
+
+	if (ad->devargs.flow_parser[0] == '\0') {
+		PMD_INIT_LOG(WARNING, "flow module is not initialized");
+		return 0;
+	}
+
+	ret = cpfl_flow_engine_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init flow engines");
+		goto err;
+	}
+
+	ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to create flow parser");
+		goto err;
+	}
+
+	return ret;
+
+err:
+	cpfl_flow_engine_uninit(ad);
+	return ret;
+}
+
+void
+cpfl_flow_uninit(struct cpfl_adapter_ext *ad)
+{
+	if (ad->devargs.flow_parser[0] == '\0')
+		return;
+
+	cpfl_parser_destroy(ad->flow_parser);
+	cpfl_flow_engine_uninit(ad);
+}
diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h
new file mode 100644
index 0000000000..8c19b853ca
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow.h
@@ -0,0 +1,85 @@
+/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FLOW_H_
+#define _CPFL_FLOW_H_
+
+#include <rte_flow.h>
+#include "cpfl_ethdev.h"
+
+#define CPFL_PREC_MAX 7
+
+extern const struct rte_flow_ops cpfl_flow_ops;
+
+enum cpfl_flow_engine_type {
+	CPFL_FLOW_ENGINE_NONE = 0,
+	CPFL_FLOW_ENGINE_FXP,
+};
+
+typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad);
+typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad);
+typedef int (*engine_create_t)(struct rte_eth_dev *dev,
+			       struct rte_flow *flow,
+			       void *meta,
+			       struct rte_flow_error *error);
+typedef int (*engine_destroy_t)(struct rte_eth_dev *dev,
+				struct rte_flow *flow,
+				struct rte_flow_error *error);
+typedef int (*engine_query_t)(struct rte_eth_dev *dev,
+			      struct rte_flow *flow,
+			      struct rte_flow_query_count *count,
+			      struct rte_flow_error *error);
+typedef void (*engine_free_t) (struct rte_flow *flow);
+typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev,
+					     const struct rte_flow_attr *attr,
+					     const struct rte_flow_item pattern[],
+					     const struct rte_flow_action actions[],
+					     void **meta);
+
+struct cpfl_flow_engine {
+	TAILQ_ENTRY(cpfl_flow_engine) node;
+	enum cpfl_flow_engine_type type;
+	engine_init_t init;
+	engine_uninit_t uninit;
+	engine_create_t create;
+	engine_destroy_t destroy;
+	engine_query_t query_count;
+	engine_free_t free;
+	engine_parse_pattern_action_t parse_pattern_action;
+};
+
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) next;
+	struct cpfl_flow_engine *engine;
+	void *rule;
+};
+
+void cpfl_flow_engine_register(struct cpfl_flow_engine *engine);
+struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev,
+						const struct rte_flow_attr *attr,
+						const struct rte_flow_item pattern[],
+						const struct rte_flow_action actions[],
+						void **meta);
+int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter);
+void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter);
+int cpfl_flow_init(struct cpfl_adapter_ext *ad);
+void cpfl_flow_uninit(struct cpfl_adapter_ext *ad);
+struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev,
+				  const struct rte_flow_attr *attr,
+				  const struct rte_flow_item pattern[],
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_error *error);
+int cpfl_flow_validate(struct rte_eth_dev *dev,
+		       const struct rte_flow_attr *attr,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       struct rte_flow_error *error);
+int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error);
+int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int cpfl_flow_query(struct rte_eth_dev *dev,
+		    struct rte_flow *flow,
+		    const struct rte_flow_action *actions,
+		    void *data,
+		    struct rte_flow_error *error);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index d767818eb7..f5654d5b0e 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -41,6 +41,7 @@ endif
 
 if dpdk_conf.has('RTE_HAS_JANSSON')
     sources += files(
+	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
     )
     ext_deps += jansson_dep
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 4/9] net/cpfl: set up control path
  2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
                                   ` (2 preceding siblings ...)
  2023-10-09  4:00                 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
@ 2023-10-09  4:00                 ` Zhang, Yuying
  2023-10-09  4:00                 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
                                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-10-09  4:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Set up a dedicate vport with 4 pairs of control queues for flow offloading.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_controlq.c | 801 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  75 +++
 drivers/net/cpfl/cpfl_ethdev.c   | 270 +++++++++++
 drivers/net/cpfl/cpfl_ethdev.h   |  14 +
 drivers/net/cpfl/cpfl_vchnl.c    | 144 ++++++
 drivers/net/cpfl/meson.build     |   1 +
 6 files changed, 1305 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h

diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
new file mode 100644
index 0000000000..4a925bc338
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -0,0 +1,801 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include "cpfl_controlq.h"
+#include "base/idpf_controlq.h"
+#include "rte_common.h"
+
+/**
+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
+ * @qinfo: pointer to create control queue info struct
+ *
+ * Verify that DMA parameter of each DMA memory struct is present and
+ * consistent with control queue parameters
+ */
+static inline int
+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
+{
+	struct idpf_dma_mem *ring = &qinfo->ring_mem;
+	struct idpf_dma_mem *buf = &qinfo->buf_mem;
+
+	if (!ring->va || !ring->size)
+		return -EINVAL;
+
+	if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
+		return -EINVAL;
+
+	/* no need for buffer checks for TX queues */
+	if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
+		return 0;
+
+	if (!buf->va || !buf->size)
+		return -EINVAL;
+
+	/* accommodate different types of rx ring buffer sizes */
+	if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
+	     buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
+	    (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
+	     buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ * @qinfo: pointer to create queue info struct
+ *
+ * The CP takes care of all DMA memory allocations. Store the allocated memory
+ * information for the descriptor ring and buffers. If the memory for either the
+ * descriptor ring or the buffers is not allocated properly and/or inconsistent
+ * with the control queue parameters, this routine will free the memory for
+ * both the descriptors and the buffers
+ */
+int
+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
+			 struct cpfl_ctlq_create_info *qinfo)
+{
+	int ret_code = 0;
+	unsigned int elem_size;
+	int i = 0;
+
+	ret_code = cpfl_check_dma_mem_parameters(qinfo);
+	if (ret_code)
+		/* TODO: Log an error message per CP */
+		goto err;
+
+	cq->desc_ring.va = qinfo->ring_mem.va;
+	cq->desc_ring.pa = qinfo->ring_mem.pa;
+	cq->desc_ring.size = qinfo->ring_mem.size;
+
+	switch (cq->cq_type) {
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_RDMA_RX:
+		/* Only receive queues will have allocated buffers
+		 * during init.  CP allocates one big chunk of DMA
+		 * region who size is equal to ring_len * buff_size.
+		 * In CPFLib, the block gets broken down to multiple
+		 * smaller blocks that actually gets programmed in the hardware.
+		 */
+
+		cq->bi.rx_buff = (struct idpf_dma_mem **)
+			idpf_calloc(hw, cq->ring_size,
+				    sizeof(struct idpf_dma_mem *));
+		if (!cq->bi.rx_buff) {
+			ret_code = -ENOMEM;
+			/* TODO: Log an error message per CP */
+			goto err;
+		}
+
+		elem_size = qinfo->buf_size;
+		for (i = 0; i < cq->ring_size; i++) {
+			cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
+					    (hw, 1,
+					     sizeof(struct idpf_dma_mem));
+			if (!cq->bi.rx_buff[i]) {
+				ret_code = -ENOMEM;
+				goto free_rx_buffs;
+			}
+			cq->bi.rx_buff[i]->va =
+			    (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));
+			cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
+					       (i * elem_size);
+			cq->bi.rx_buff[i]->size = elem_size;
+		}
+		break;
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_RDMA_TX:
+	case IDPF_CTLQ_TYPE_RDMA_COMPL:
+		break;
+	default:
+		ret_code = -EINVAL;
+	}
+
+	return ret_code;
+
+free_rx_buffs:
+	i--;
+	for (; i >= 0; i--)
+		idpf_free(hw, cq->bi.rx_buff[i]);
+
+	if (!cq->bi.rx_buff)
+		idpf_free(hw, cq->bi.rx_buff);
+
+err:
+	return ret_code;
+}
+
+/**
+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	for (i = 0; i < cq->ring_size; i++) {
+		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+		/* No buffer to post to descriptor, continue */
+		if (!bi)
+			continue;
+
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+		desc->opcode = 0;
+		desc->datalen = CPU_TO_LE16(bi->size);
+		desc->ret_val = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+		desc->params.indirect.param0 = 0;
+		desc->params.indirect.param1 = 0;
+	}
+}
+
+/**
+ * cpfl_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
+{
+	/* set control queue registers in our local struct */
+	cq->reg.head = q_create_info->reg.head;
+	cq->reg.tail = q_create_info->reg.tail;
+	cq->reg.len = q_create_info->reg.len;
+	cq->reg.bah = q_create_info->reg.bah;
+	cq->reg.bal = q_create_info->reg.bal;
+	cq->reg.len_mask = q_create_info->reg.len_mask;
+	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+	cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * cpfl_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
+{
+	/* Update tail to post pre-allocated buffers for rx queues */
+	if (is_rxq)
+		wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
+
+	/* For non-Mailbox control queues only TAIL need to be set */
+	if (cq->q_id != -1)
+		return;
+
+	/* Clear Head for both send or receive */
+	wr32(hw, cq->reg.head, 0);
+
+	/* set starting point */
+	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
+ * @hw: context info for the callback
+ * @cq: pointer to the specific control queue
+ *
+ * DMA buffers are released by the CP itself
+ */
+static void
+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
+{
+	int i;
+
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
+	    cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
+		for (i = 0; i < cq->ring_size; i++)
+			idpf_free(hw, cq->bi.rx_buff[i]);
+		/* free the buffer header */
+		idpf_free(hw, cq->bi.rx_buff);
+	} else {
+		idpf_free(hw, cq->bi.tx_msg);
+	}
+}
+
+/**
+ * cpfl_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ */
+int
+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+	      struct idpf_ctlq_info **cq_out)
+{
+	struct idpf_ctlq_info *cq;
+	bool is_rxq = false;
+	int status = 0;
+
+	if (!qinfo->len || !qinfo->buf_size ||
+	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+		return -EINVAL;
+
+	cq = (struct idpf_ctlq_info *)
+	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+
+	if (!cq)
+		return -ENOMEM;
+
+	cq->cq_type = qinfo->type;
+	cq->q_id = qinfo->id;
+	cq->buf_size = qinfo->buf_size;
+	cq->ring_size = qinfo->len;
+
+	cq->next_to_use = 0;
+	cq->next_to_clean = 0;
+	cq->next_to_post = cq->ring_size - 1;
+
+	switch (qinfo->type) {
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+		is_rxq = true;
+		/* fallthrough */
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+		status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
+		break;
+
+	default:
+		status = -EINVAL;
+		break;
+	}
+
+	if (status)
+		goto init_free_q;
+
+	if (is_rxq) {
+		cpfl_ctlq_init_rxq_bufs(cq);
+	} else {
+		/* Allocate the array of msg pointers for TX queues */
+		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
+			idpf_calloc(hw, qinfo->len,
+				    sizeof(struct idpf_ctlq_msg *));
+		if (!cq->bi.tx_msg) {
+			status = -ENOMEM;
+			goto init_dealloc_q_mem;
+		}
+	}
+
+	cpfl_ctlq_setup_regs(cq, qinfo);
+
+	cpfl_ctlq_init_regs(hw, cq, is_rxq);
+
+	idpf_init_lock(&cq->cq_lock);
+
+	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
+
+	*cq_out = cq;
+	return status;
+
+init_dealloc_q_mem:
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+	idpf_free(hw, cq);
+	cq = NULL;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int
+cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+	       uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_desc *desc;
+	int num_desc_avail = 0;
+	int status = 0;
+	int i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	/* Ensure there are enough descriptors to send all messages */
+	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+		status = -ENOSPC;
+		goto sq_send_command_out;
+	}
+
+	for (i = 0; i < num_q_msg; i++) {
+		struct idpf_ctlq_msg *msg = &q_msg[i];
+
+		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+		desc->opcode = CPU_TO_LE16(msg->opcode);
+		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+		desc->cookie_high =
+			CPU_TO_LE32(msg->cookie.mbx.chnl_opcode);
+		desc->cookie_low =
+			CPU_TO_LE32(msg->cookie.mbx.chnl_retval);
+		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+				IDPF_CTLQ_FLAG_HOST_ID_S);
+		if (msg->data_len) {
+			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+			desc->datalen |= CPU_TO_LE16(msg->data_len);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+			/* Update the address values in the desc with the pa
+			 * value for respective buffer
+			 */
+			desc->params.indirect.addr_high =
+				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+			desc->params.indirect.addr_low =
+				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		} else {
+			idpf_memcpy(&desc->params, msg->ctx.direct,
+				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+		}
+
+		/* Store buffer info */
+		cq->bi.tx_msg[cq->next_to_use] = msg;
+		(cq->next_to_use)++;
+		if (cq->next_to_use == cq->ring_size)
+			cq->next_to_use = 0;
+	}
+
+	/* Force memory write to complete before letting hardware
+	 * know that there are new descriptors to fetch.
+	 */
+	idpf_wmb();
+	wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+	idpf_release_lock(&cq->cq_lock);
+
+	return status;
+}
+
+/**
+ * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write
+ * back for the requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ * @force: (input) clean descriptors which were not done yet. Use with caution
+ * in kernel mode only
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+static int
+__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		     struct idpf_ctlq_msg *msg_status[], bool force)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t i = 0, num_to_clean;
+	uint16_t ntc, desc_err;
+	int ret = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*clean_count == 0)
+		return 0;
+	if (*clean_count > cq->ring_size)
+		return -EINVAL;
+
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *clean_count;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		desc_err = LE16_TO_CPU(desc->ret_val);
+		if (desc_err) {
+			/* strip off FW internal code */
+			desc_err &= 0xff;
+		}
+
+		msg_status[i] = cq->bi.tx_msg[ntc];
+		if (!msg_status[i])
+			break;
+		msg_status[i]->status = desc_err;
+		cq->bi.tx_msg[ntc] = NULL;
+		/* Zero out any stale data */
+		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	}
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+
+	/* Return number of descriptors actually cleaned */
+	*clean_count = i;
+
+	return ret;
+}
+
+/**
+ * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int
+cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+		   struct idpf_ctlq_msg *msg_status[])
+{
+	return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false);
+}
+
+/**
+ * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int
+cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	struct idpf_ctlq_desc *desc;
+	uint16_t ntp = cq->next_to_post;
+	bool buffs_avail = false;
+	uint16_t tbp = ntp + 1;
+	int status = 0;
+	int i = 0;
+
+	if (*buff_count > cq->ring_size)
+		return -EINVAL;
+
+	if (*buff_count > 0)
+		buffs_avail = true;
+	idpf_acquire_lock(&cq->cq_lock);
+	if (tbp >= cq->ring_size)
+		tbp = 0;
+
+	if (tbp == cq->next_to_clean)
+		/* Nothing to do */
+		goto post_buffs_out;
+
+	/* Post buffers for as many as provided or up until the last one used */
+	while (ntp != cq->next_to_clean) {
+		desc = IDPF_CTLQ_DESC(cq, ntp);
+		if (cq->bi.rx_buff[ntp])
+			goto fill_desc;
+		if (!buffs_avail) {
+			/* If the caller hasn't given us any buffers or
+			 * there are none left, search the ring itself
+			 * for an available buffer to move to this
+			 * entry starting at the next entry in the ring
+			 */
+			tbp = ntp + 1;
+			/* Wrap ring if necessary */
+			if (tbp >= cq->ring_size)
+				tbp = 0;
+
+			while (tbp != cq->next_to_clean) {
+				if (cq->bi.rx_buff[tbp]) {
+					cq->bi.rx_buff[ntp] =
+						cq->bi.rx_buff[tbp];
+					cq->bi.rx_buff[tbp] = NULL;
+
+					/* Found a buffer, no need to
+					 * search anymore
+					 */
+					break;
+				}
+
+				/* Wrap ring if necessary */
+				tbp++;
+				if (tbp >= cq->ring_size)
+					tbp = 0;
+			}
+
+			if (tbp == cq->next_to_clean)
+				goto post_buffs_out;
+		} else {
+			/* Give back pointer to DMA buffer */
+			cq->bi.rx_buff[ntp] = buffs[i];
+			i++;
+
+			if (i >= *buff_count)
+				buffs_avail = false;
+		}
+
+fill_desc:
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+		/* Post buffers to descriptor */
+		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+		ntp++;
+		if (ntp == cq->ring_size)
+			ntp = 0;
+	}
+
+post_buffs_out:
+	/* Only update tail if buffers were actually posted */
+	if (cq->next_to_post != ntp) {
+		if (ntp)
+			/* Update next_to_post to ntp - 1 since current ntp
+			 * will not have a buffer
+			 */
+			cq->next_to_post = ntp - 1;
+		else
+			/* Wrap to end of end ring since current ntp is 0 */
+			cq->next_to_post = cq->ring_size - 1;
+
+		wr32(hw, cq->reg.tail, cq->next_to_post);
+	}
+
+	idpf_release_lock(&cq->cq_lock);
+	/* return the number of buffers that were not posted */
+	*buff_count = *buff_count - i;
+
+	return status;
+}
+
+/**
+ * cpfl_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int
+cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+	       struct idpf_ctlq_msg *q_msg)
+{
+	uint16_t num_to_clean, ntc, ret_val, flags;
+	struct idpf_ctlq_desc *desc;
+	int ret_code = 0;
+	uint16_t i = 0;
+
+	if (!cq || !cq->ring_size)
+		return -ENOBUFS;
+
+	if (*num_q_msg == 0)
+		return 0;
+	else if (*num_q_msg > cq->ring_size)
+		return -EINVAL;
+
+	/* take the lock before we start messing with the ring */
+	idpf_acquire_lock(&cq->cq_lock);
+	ntc = cq->next_to_clean;
+	num_to_clean = *num_q_msg;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		flags = LE16_TO_CPU(desc->flags);
+		if (!(flags & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		ret_val = LE16_TO_CPU(desc->ret_val);
+		q_msg[i].vmvf_type = (flags &
+				     (IDPF_CTLQ_FLAG_FTYPE_VM |
+				      IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+				      IDPF_CTLQ_FLAG_FTYPE_S;
+
+		if (flags & IDPF_CTLQ_FLAG_ERR)
+			ret_code = -EBADMSG;
+
+		q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
+		q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
+		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+		q_msg[i].status = ret_val;
+
+		if (desc->datalen) {
+			idpf_memcpy(q_msg[i].ctx.indirect.context,
+				    &desc->params.indirect,
+				    IDPF_INDIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+
+			/* Assign pointer to dma buffer to ctlq_msg array
+			 * to be given to upper layer
+			 */
+			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+			/* Zero out pointer to DMA buffer info;
+			 * will be repopulated by post buffers API
+			 */
+			cq->bi.rx_buff[ntc] = NULL;
+		} else {
+			idpf_memcpy(q_msg[i].ctx.direct,
+				    desc->params.raw,
+				    IDPF_DIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+		}
+
+		/* Zero out stale data in descriptor */
+		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+			    IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	};
+
+	cq->next_to_clean = ntc;
+	idpf_release_lock(&cq->cq_lock);
+	*num_q_msg = i;
+	if (*num_q_msg == 0)
+		ret_code = -ENOMSG;
+
+	return ret_code;
+}
+
+int
+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+		    struct idpf_ctlq_info **cq)
+{
+	return cpfl_ctlq_add(hw, qinfo, cq);
+}
+
+/**
+ * cpfl_ctlq_shutdown - shutdown the CQ
+ * The main shutdown routine for any controq queue
+ */
+static void
+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (!cq->ring_size)
+		goto shutdown_sq_out;
+
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+
+	/* Set ring_size to 0 to indicate uninitialized queue */
+	cq->ring_size = 0;
+
+shutdown_sq_out:
+	idpf_release_lock(&cq->cq_lock);
+	idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * cpfl_ctlq_remove - deallocate and remove specified control queue
+ */
+static void
+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	LIST_REMOVE(cq, cq_list);
+	cpfl_ctlq_shutdown(hw, cq);
+	idpf_free(hw, cq);
+}
+
+void
+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	cpfl_ctlq_remove(hw, cq);
+}
+
+int
+cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		     uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg,
+		     struct idpf_ctlq_msg q_msg[])
+{
+	return cpfl_ctlq_recv(cq, num_q_msg, q_msg);
+}
+
+int
+cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			      uint16_t *buff_count, struct idpf_dma_mem **buffs)
+{
+	return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs);
+}
+
+int
+cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count,
+			 struct idpf_ctlq_msg *msg_status[])
+{
+	return cpfl_ctlq_clean_sq(cq, clean_count, msg_status);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
new file mode 100644
index 0000000000..740ae6522c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CONTROLQ_H_
+#define _CPFL_CONTROLQ_H_
+
+#include "base/idpf_osdep.h"
+#include "base/idpf_controlq_api.h"
+
+#define CPFL_CTLQ_DESCRIPTOR_SIZE	32
+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE	4096
+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE	256
+#define CPFL_DFLT_MBX_RING_LEN		512
+#define CPFL_CFGQ_RING_LEN		512
+
+/* CRQ/CSQ specific error codes */
+#define CPFL_ERR_CTLQ_ERROR             -74     /* -EBADMSG */
+#define CPFL_ERR_CTLQ_TIMEOUT           -110    /* -ETIMEDOUT */
+#define CPFL_ERR_CTLQ_FULL              -28     /* -ENOSPC */
+#define CPFL_ERR_CTLQ_NO_WORK           -42     /* -ENOMSG */
+#define CPFL_ERR_CTLQ_EMPTY             -105    /* -ENOBUFS */
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct cpfl_ctlq_create_info {
+	enum idpf_ctlq_type type;
+	int id; /* absolute queue offset passed as input
+		 * -1 for default mailbox if present
+		 */
+	uint16_t len; /* Queue length passed as input */
+	uint16_t buf_size; /* buffer size passed as input */
+	uint64_t base_address; /* output, HPA of the Queue start  */
+	struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+	/* Pass down previously allocated descriptor ring and buffer memory
+	 * for each control queue to be created
+	 */
+	struct idpf_dma_mem ring_mem;
+	/* The CP will allocate one large buffer that the CPFlib will piece
+	 * into individual buffers for each descriptor
+	 */
+	struct idpf_dma_mem buf_mem;
+
+	int ext_info_size;
+	void *ext_info; /* Specific to q type */
+};
+
+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
+			     struct idpf_ctlq_info *cq,
+			     struct cpfl_ctlq_create_info *qinfo);
+int cpfl_ctlq_add(struct idpf_hw *hw,
+		  struct cpfl_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq);
+int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+		       struct idpf_ctlq_msg *msg_status[]);
+int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			    u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg);
+int cpfl_vport_ctlq_add(struct idpf_hw *hw,
+			struct cpfl_ctlq_create_info *qinfo,
+			struct idpf_ctlq_info **cq);
+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			 u16 num_q_msg, struct idpf_ctlq_msg q_msg[]);
+int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+			 struct idpf_ctlq_msg q_msg[]);
+
+int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+				  u16 *buff_count, struct idpf_dma_mem **buffs);
+int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+			     struct idpf_ctlq_msg *msg_status[]);
+#endif
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c350728861..a2bc6784d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1698,6 +1698,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 		return;
 	}
 
+	/* ignore if it is ctrl vport */
+	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
+		return;
+
 	vport = cpfl_find_vport(adapter, vc_event->vport_id);
 	if (!vport) {
 		PMD_DRV_LOG(ERR, "Can't find vport.");
@@ -1893,6 +1897,262 @@ cpfl_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 }
 
+static int
+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	ret = cpfl_config_ctlq_tx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
+		return ret;
+	}
+
+	ret = cpfl_config_ctlq_rx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
+		return ret;
+	}
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	int i;
+
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (adapter->ctlqp[i])
+			cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+}
+
+static int
+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_ctlq_info *cfg_cq;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		cfg_cq = NULL;
+		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
+					  &adapter->cfgq_info[i],
+					  &cfg_cq);
+		if (ret || !cfg_cq) {
+			PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d",
+				    adapter->cfgq_info[i].id);
+			cpfl_remove_cfgqs(adapter);
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
+			    adapter->cfgq_info[i].id);
+		adapter->ctlqp[i] = cfg_cq;
+	}
+
+	return ret;
+}
+
+#define CPFL_CFGQ_RING_LEN		512
+#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
+#define CPFL_CFGQ_BUFFER_SIZE		256
+#define CPFL_CFGQ_RING_SIZE		512
+
+static int
+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	struct cpfl_vport *vport;
+	int i, err;
+	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);
+	uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;
+
+	vport = &adapter->ctrl_vport;
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (i % 2 == 0) {
+			/* Setup Tx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +
+				i / 2 * vport->base.chunks_info.tx_qtail_spacing;
+		} else {
+			/* Setup Rx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +
+				i / 2 * vport->base.chunks_info.rx_qtail_spacing;
+			if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,
+						buf_size)) {
+				err = -ENOMEM;
+				goto free_mem;
+			}
+		}
+		if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,
+					ring_size)) {
+			err = -ENOMEM;
+			goto free_mem;
+		}
+	}
+	return 0;
+free_mem:
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+	return err;
+}
+
+static int
+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_create_vport *vport_info =
+		(struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;
+	int i;
+
+	vport->itf.adapter = adapter;
+	vport->base.adapter = &adapter->base;
+	vport->base.vport_id = vport_info->vport_id;
+
+	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+		if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {
+			vport->base.chunks_info.tx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.tx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.tx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {
+			vport->base.chunks_info.rx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.rx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.rx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else {
+			PMD_INIT_LOG(ERR, "Unsupported chunk type");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)
+{
+	cpfl_stop_cfgqs(adapter);
+	cpfl_remove_cfgqs(adapter);
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+}
+
+static int
+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_vc_create_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to create control vport");
+		return ret;
+	}
+
+	ret = cpfl_init_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init control vport");
+		goto err_init_ctrl_vport;
+	}
+
+	ret = cpfl_cfgq_setup(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control queues");
+		goto err_cfgq_setup;
+	}
+
+	ret = cpfl_add_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to add control queues");
+		goto err_add_cfgq;
+	}
+
+	ret = cpfl_start_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to start control queues");
+		goto err_start_cfgqs;
+	}
+
+	return 0;
+
+err_start_cfgqs:
+	cpfl_stop_cfgqs(adapter);
+err_add_cfgq:
+	cpfl_remove_cfgqs(adapter);
+err_cfgq_setup:
+err_init_ctrl_vport:
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+
+	return ret;
+}
+
 static struct virtchnl2_get_capabilities req_caps = {
 	.csum_caps =
 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
@@ -2060,6 +2320,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+	ret = cpfl_ctrl_path_open(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control path");
+		goto err_create_ctrl_vport;
+	}
+
 #ifdef RTE_HAS_JANSSON
 	ret = cpfl_flow_init(adapter);
 	if (ret) {
@@ -2076,7 +2342,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 #ifdef RTE_HAS_JANSSON
 err_flow_init:
+	cpfl_ctrl_path_close(adapter);
 #endif
+err_create_ctrl_vport:
+	rte_free(adapter->vports);
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_allowlist_uninit(adapter);
@@ -2315,6 +2584,7 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 #ifdef RTE_HAS_JANSSON
 	cpfl_flow_uninit(adapter);
 #endif
+	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 69bf32cfbd..7f83d170d7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -22,6 +22,7 @@
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
 #include "cpfl_representor.h"
+#include "cpfl_controlq.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -82,6 +83,10 @@
 #define CPFL_META_CHUNK_LENGTH	1024
 #define CPFL_META_LENGTH	32
 
+#define CPFL_RX_CFGQ_NUM	4
+#define CPFL_TX_CFGQ_NUM	4
+#define CPFL_CFGQ_NUM		8
+
 /* bit[15:14] type
  * bit[13] host/accelerator core
  * bit[12] apf/cpf
@@ -212,6 +217,12 @@ struct cpfl_adapter_ext {
 	struct cpfl_flow_js_parser *flow_parser;
 
 	struct cpfl_metadata meta;
+
+	/* ctrl vport and ctrl queues. */
+	struct cpfl_vport ctrl_vport;
+	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
+	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
+	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
@@ -226,6 +237,9 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 			   struct cpchnl2_vport_id *vport_id,
 			   struct cpfl_vport_id *vi,
 			   struct cpchnl2_get_vport_info_response *response);
+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
index a21a4a451f..7d277a0e8e 100644
--- a/drivers/net/cpfl/cpfl_vchnl.c
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 
 	return 0;
 }
+
+int
+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct virtchnl2_create_vport vport_msg;
+	struct idpf_cmd_info args;
+	int err = -1;
+
+	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
+	vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+	vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
+	vport_msg.num_tx_complq = 0;
+	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
+	vport_msg.num_rx_bufq = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+	args.in_args = (uint8_t *)&vport_msg;
+	args.in_args_size = sizeof(vport_msg);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+		return err;
+	}
+
+	memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
+	       IDPF_DFLT_MBX_BUF_SIZE);
+	return err;
+}
+
+int
+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_RX_CFGQ_NUM;
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (!vc_rxqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->base.vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		rxq_info = &vc_rxqs->qinfo[i];
+		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
+		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;
+		rxq_info->max_pkt_size = vport->base.max_pkt_len;
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_rxqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_TX_CFGQ_NUM;
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (!vc_txqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->base.vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		txq_info = &vc_txqs->qinfo[i];
+		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
+		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_txqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index f5654d5b0e..290ff1e655 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -18,6 +18,7 @@ sources = files(
         'cpfl_rxtx.c',
         'cpfl_vchnl.c',
         'cpfl_representor.c',
+        'cpfl_controlq.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 5/9] net/cpfl: add FXP low level implementation
  2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
                                   ` (3 preceding siblings ...)
  2023-10-09  4:00                 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
@ 2023-10-09  4:00                 ` Zhang, Yuying
  2023-10-09  4:00                 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
                                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-10-09  4:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add low level helper functions for CPFL PMD to create / delete
rules on IPU's Flexible Packet Processor(FXP).

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_actions.h | 858 ++++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_rules.c   | 127 +++++
 drivers/net/cpfl/cpfl_rules.h   | 306 ++++++++++++
 drivers/net/cpfl/meson.build    |   1 +
 4 files changed, 1292 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h
new file mode 100644
index 0000000000..7b82119e39
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_actions.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_ACTIONS_H_
+#define _CPFL_ACTIONS_H_
+
+#include "base/idpf_osdep.h"
+
+#pragma pack(1)
+
+union cpfl_action_set {
+	uint32_t data;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 4;
+		uint32_t tag : 1;
+		uint32_t prec : 3;
+	} set_24b_a;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 3;
+		uint32_t tag : 2;
+		uint32_t prec : 3;
+	} set_24b_b;
+
+	struct {
+		uint32_t val : 16;
+		uint32_t idx : 4;
+		uint32_t unused : 6;
+		uint32_t tag : 3;
+		uint32_t prec : 3;
+	} set_16b;
+
+	struct {
+		uint32_t val_a : 8;
+		uint32_t val_b : 8;
+		uint32_t idx_a : 4;
+		uint32_t idx_b : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_8b;
+
+	struct {
+		uint32_t val : 10;
+		uint32_t ena : 10;
+		uint32_t idx : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_1b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} nop;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} chained_24b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} aux_flags;
+};
+
+struct cpfl_action_set_ext {
+#define CPFL_ACTION_SET_EXT_CNT 2
+	union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT];
+};
+
+#pragma pack()
+
+/**
+ * cpfl_act_nop - Encode a NOP action
+ */
+static inline union cpfl_action_set
+cpfl_act_nop(void)
+{
+	union cpfl_action_set act;
+
+	act.data = 0;
+	return act;
+}
+
+/**
+ * cpfl_is_nop_action - Indicate if an action set is a NOP
+ */
+static inline bool
+cpfl_is_nop_action(union cpfl_action_set *act)
+{
+	return act->data == cpfl_act_nop().data;
+}
+
+#define CPFL_MAKE_MASK32(b, s)	((((uint32_t)1 << (b)) - 1) << (s))
+
+#define CPFL_ACT_PREC_MAX	7
+#define CPFL_ACT_PREC_S		29
+#define CPFL_ACT_PREC_M		CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S)
+#define CPFL_ACT_PREC_SET(p)	\
+	(((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M)
+#define CPFL_ACT_PREC_CHECK(p)	((p) > 0 && (p) <= CPFL_ACT_PREC_MAX)
+
+#define CPFL_METADATA_ID_CNT		32	/* Max number of metadata IDs */
+#define CPFL_METADATA_STRUCT_MAX_SZ	128	/* Max metadata size per ID */
+
+/*******************************************************************************
+ * 1-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_1B_OP_S	24
+#define CPFL_ACT_1B_OP_M	CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S)
+#define CPFL_ACT_1B_OP		((uint32_t)(0x01) << CPFL_ACT_1B_OP_S)
+
+#define CPFL_ACT_1B_VAL_S	0
+#define CPFL_ACT_1B_VAL_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S)
+#define CPFL_ACT_1B_EN_S	10
+#define CPFL_ACT_1B_EN_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S)
+#define CPFL_ACT_1B_INDEX_S	20
+#define CPFL_ACT_1B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S)
+
+/* 1-bit actions currently uses only INDEX of 0 */
+#define CPFL_ACT_MAKE_1B(prec, en, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \
+	 ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \
+	 (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M))
+
+enum cpfl_act_1b_op {
+	CPFL_ACT_1B_OP_DROP		= 0x01,
+	CPFL_ACT_1B_OP_HDR_SPLIT	= 0x02,
+	CPFL_ACT_1B_OP_DIR_CHANGE	= 0x04,
+	CPFL_ACT_1B_OP_DEFER_DROP	= 0x08,
+	CPFL_ACT_1B_OP_ORIG_MIR_MD	= 0x80
+};
+
+#define CPFL_ACT_1B_COMMIT_MODE_S	4
+#define CPFL_ACT_1B_COMMIT_MODE_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S)
+
+/**
+ * cpfl_act_commit_mode - action commit mode for certain action classes
+ */
+enum cpfl_act_commit_mode {
+	/* Action processing for the initial classification pass */
+	CPFL_ACT_COMMIT_ALL		= 0, /* Commit all actions */
+	CPFL_ACT_COMMIT_PRE_MOD		= 1, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_NONE		= 2, /* Commit no action */
+	/* Action processing for deferred actions in a recirculation pass */
+	CPFL_ACT_COMMIT_RECIR_ALL	= 4, /* Commit all actions */
+	CPFL_ACT_COMMIT_RECIR_PRE_MOD	= 5, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_RECIR_NONE	= 6  /* Commit no action */
+};
+
+/*******************************************************************************
+ * 8-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_8B_S	24
+#define CPFL_ACT_OP_8B_M	CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S)
+#define CPFL_ACT_OP_8B		((uint32_t)(0x02) << CPFL_ACT_OP_8B_S)
+
+#define CPFL_ACT_8B_A_VAL_S	0
+#define CPFL_ACT_8B_A_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S)
+#define CPFL_ACT_8B_A_INDEX_S	16
+#define CPFL_ACT_8B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S)
+
+#define CPFL_ACT_8B_B_VAL_S	8
+#define CPFL_ACT_8B_B_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S)
+#define CPFL_ACT_8B_B_INDEX_S	20
+#define CPFL_ACT_8B_B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S)
+
+/* Unless combining two 8-bit actions into an action set, both A and B fields
+ * must be the same,
+ */
+#define CPFL_ACT_MAKE_8B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \
+	 (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \
+	 (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \
+	 (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \
+	 (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M))
+
+/* 8-Bit Action Indices */
+#define CPFL_ACT_8B_INDEX_MOD_META		9
+
+/* 8-Bit Action Miscellaneous */
+#define CPFL_ACT_8B_MOD_META_PROF_CNT		16
+#define CPFL_ACT_8B_MOD_META_VALID		0x80
+
+/*******************************************************************************
+ * 16-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_16B_S	26
+#define CPFL_ACT_OP_16B_M	CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S)
+#define CPFL_ACT_OP_16B		((uint32_t)0x1 << CPFL_ACT_OP_16B_S)
+
+#define CPFL_ACT_16B_INDEX_S	16
+#define CPFL_ACT_16B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S)
+#define CPFL_ACT_16B_VAL_S	0
+#define CPFL_ACT_16B_VAL_M	CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S)
+
+#define CPFL_ACT_MAKE_16B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \
+	 (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M))
+
+/* 16-Bit Action Indices */
+#define CPFL_ACT_16B_INDEX_COUNT_SET		0
+#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX	1
+#define CPFL_ACT_16B_INDEX_SET_VSI		2
+#define CPFL_ACT_16B_INDEX_DEL_MD		4
+#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST		5
+
+/* 16-Bit Action Miscellaneous */
+#define CPFL_ACT_16B_COUNT_SET_CNT		2048 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_SET_VSI_SLOTS		2
+#define CPFL_ACT_16B_FWD_VSI_CNT		1032 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_FWD_VSI_LIST_CNT		256
+#define CPFL_ACT_16B_MOD_VSI_LIST_CNT		1024
+#define CPFL_ACT_16B_FWD_PORT_CNT		4
+#define CPFL_ACT_16B_DEL_MD_MID_CNT		32
+#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS		4
+
+/* 16-Bit SET_MCAST_IDX Action */
+#define CPFL_ACT_16B_SET_MCAST_VALID	((uint32_t)1 << 15)
+
+/* 16-Bit SET_VSI Action Variants */
+#define CPFL_ACT_16B_SET_VSI_VAL_S		0
+#define CPFL_ACT_16B_SET_VSI_VAL_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S)
+#define CPFL_ACT_16B_SET_VSI_PE_S		11
+#define CPFL_ACT_16B_SET_VSI_PE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S)
+#define CPFL_ACT_16B_SET_VSI_TYPE_S		14
+#define CPFL_ACT_16B_SET_VSI_TYPE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S)
+
+/* 16-Bit DEL_MD Action */
+#define CPFL_ACT_16B_DEL_MD_0_S		0
+#define CPFL_ACT_16B_DEL_MD_1_S		5
+
+/* 16-Bit MOD_VSI_LIST Actions */
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S	0
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M	\
+	CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S)
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S	14
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S)
+#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \
+	((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \
+	 (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_ID_M))
+
+#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \
+	((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \
+		CPFL_ACT_16B_SET_VSI_TYPE_M) | \
+	 (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \
+		CPFL_ACT_16B_SET_VSI_PE_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \
+		CPFL_ACT_16B_SET_VSI_VAL_M))
+
+enum cpfl_prot_eng {
+	CPFL_PE_LAN = 0,
+	CPFL_PE_RDMA,
+	CPFL_PE_CRT
+};
+
+enum cpfl_act_fwd_type {
+	CPFL_ACT_FWD_VSI,
+	CPFL_ACT_FWD_VSI_LIST,
+	CPFL_ACT_FWD_PORT
+};
+
+/*******************************************************************************
+ * 24-Bit Actions
+ ******************************************************************************/
+/* Group A */
+#define CPFL_ACT_OP_24B_A_S	28
+#define CPFL_ACT_OP_24B_A_M	CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S)
+#define CPFL_ACT_24B_A_INDEX_S	24
+#define CPFL_ACT_24B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S)
+#define CPFL_ACT_24B_A_VAL_S	0
+#define CPFL_ACT_24B_A_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S)
+
+#define CPFL_ACT_OP_24B_A	((uint32_t)1 << CPFL_ACT_OP_24B_A_S)
+
+#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_MOD_ADDR	0
+#define CPFL_ACT_24B_INDEX_MIRROR_FIRST	1
+#define CPFL_ACT_24B_INDEX_COUNT	2
+#define CPFL_ACT_24B_INDEX_SET_Q	8
+#define CPFL_ACT_24B_INDEX_MOD_PROFILE	9
+#define CPFL_ACT_24B_INDEX_METER	10
+
+#define CPFL_ACT_24B_COUNT_SLOTS	6
+#define CPFL_ACT_24B_METER_SLOTS	6
+
+#define CPFL_ACT_24B_MOD_ADDR_CNT	(16 * 1024 * 1024)
+#define CPFL_ACT_24B_COUNT_ID_CNT	((uint32_t)1 << 24)
+#define CPFL_ACT_24B_SET_Q_CNT		(12 * 1024)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS	3
+
+/* 24-Bit SET_Q Action */
+#define CPFL_ACT_24B_SET_Q_Q_S		0
+#define CPFL_ACT_24B_SET_Q_Q_M		\
+	CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_S	14
+#define CPFL_ACT_24B_SET_Q_Q_RGN_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S)
+#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS	CPFL_MAKE_MASK32(1, 17)
+#define CPFL_ACT_24B_SET_Q_DST_PE_S	21
+#define CPFL_ACT_24B_SET_Q_DST_PE_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S)
+#define CPFL_ACT_24B_SET_Q_VALID	CPFL_MAKE_MASK32(1, 23)
+
+/* 24-Bit MOD_PROFILE Action */
+enum cpfl_act_mod_profile_hint {
+	CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */
+};
+
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_S		0
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S)
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S	12
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S)
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_S		14
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S)
+#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS		((uint32_t)1 << 16)
+#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND	((uint32_t)1 << 17)
+#define CPFL_ACT_24B_MOD_PROFILE_VALID			((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES	4
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT		2048
+
+/* 24-Bit METER Actions */
+#define CPFL_ACT_24B_METER_INDEX_S	0
+#define CPFL_ACT_24B_METER_INDEX_M	\
+	CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S)
+#define CPFL_ACT_24B_METER_BANK_S	20
+#define CPFL_ACT_24B_METER_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S)
+#define CPFL_ACT_24B_METER_VALID	((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_METER_BANK_CNT	6
+#define CPFL_ACT_24B_METER_INDEX_CNT	((uint32_t)1 << 20)
+
+/* Group B */
+#define CPFL_ACT_OP_24B_B_S	27
+#define CPFL_ACT_OP_24B_B_M	CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S)
+#define CPFL_ACT_24B_B_INDEX_S	24
+#define CPFL_ACT_24B_B_INDEX_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S)
+#define CPFL_ACT_24B_B_VAL_S	0
+#define CPFL_ACT_24B_B_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S)
+
+#define CPFL_ACT_OP_24B_B	((uint32_t)1 << CPFL_ACT_OP_24B_B_S)
+
+#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_SET_MD	0
+#define CPFL_ACT_24B_INDEX_RANGE_CHECK	6
+#define CPFL_ACT_24B_SET_MD_SLOTS	6
+
+/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */
+/* 8-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD8_VAL_S	0
+#define CPFL_ACT_24B_SET_MD8_VAL_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S)
+#define CPFL_ACT_24B_SET_MD8_MASK_S	8
+#define CPFL_ACT_24B_SET_MD8_MASK_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S)
+#define CPFL_ACT_24B_SET_MD8_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD8_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S)
+/* 16-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD16_VAL_S	0
+#define CPFL_ACT_24B_SET_MD16_VAL_M	\
+	CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_L_S	16 /* For chained action */
+#define CPFL_ACT_24B_SET_MD16_MASK_L_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_H_SR	8
+#define CPFL_ACT_24B_SET_MD16_MASK_H_M	0xff
+#define CPFL_ACT_24B_SET_MD16_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD16_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD16		((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_SET_MD32_VAL_L_M	CPFL_MAKE_MASK32(24, 0)
+
+#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX		15
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX	7
+#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX	15
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX	7
+
+/* RANGE_CHECK Action */
+enum cpfl_rule_act_rc_mode {
+	CPFL_RULE_ACT_RC_1_RANGE = 0,
+	CPFL_RULE_ACT_RC_2_RANGES = 1,
+	CPFL_RULE_ACT_RC_4_RANGES = 2,
+	CPFL_RULE_ACT_RC_8_RANGES = 3
+};
+
+#define CPFL_ACT_24B_RC_TBL_IDX_S	0
+#define CPFL_ACT_24B_RC_TBL_IDX_M	\
+	CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S)
+#define CPFL_ACT_24B_RC_START_BANK_S	13
+#define CPFL_ACT_24B_RC_START_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S)
+#define CPFL_ACT_24B_RC_MODE_S		16
+#define CPFL_ACT_24B_RC_MODE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S)
+#define CPFL_ACT_24B_RC_XTRACT_PROF_S	18
+#define CPFL_ACT_24B_RC_XTRACT_PROF_M	\
+	CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S)
+
+#define CPFL_ACT_24B_RC_TBL_INDEX_CNT	(8 * 1024)
+#define CPFL_ACT_24B_RC_BANK_CNT	8
+#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT	64
+
+/*******************************************************************************
+ * 24-Bit Chained Auxiliary Actions
+ ******************************************************************************/
+
+/* TODO: HAS is being updated.  Revise the order of chained and base action
+ * when the HAS has it finalized.
+ */
+/**
+ * 24-Bit Chained SET_MD Actions
+ *
+ * Chained SET_MD actions consume two consecutive action sets.  The first one is
+ * the chained AUX action set.  The second one is the base/parent action set.
+ * Chained SET_MD actions can add and/or update metadata structure with IDs from
+ * 0 to 31 while the non-chained SET_MD variants can only update existing meta-
+ * data IDs below 16.
+ */
+
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S	8
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M	\
+	CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD_AUX_ADD		((uint32_t)1 << 15)
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S	16
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_S		0
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_M		\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S	0
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S)
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR	24 /* Upper 8 bits of MD32 */
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M	0xff
+
+#define CPFL_ACT_TYPE_CHAIN_DATA_S	29
+#define CPFL_ACT_TYPE_CHAIN_DATA_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S)
+#define CPFL_ACT_TYPE_CHAIN_DATA	((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_OP_S	21
+#define CPFL_ACT_24B_SET_MD_OP_8B	((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_16B	((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_32B	((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \
+	(CPFL_ACT_TYPE_CHAIN_DATA | (op) | \
+	 (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \
+	 (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \
+	 (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_DATA_M))
+
+/*******************************************************************************
+ * 1-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_drop - Encode a 1-bit DROP action
+ *
+ * The DROP action has precedence over the DEFER_DOP action.
+ * Affect of ACT_COMMIT action on the DROP action:
+ *  - CPFL_ACT_COMMIT_ALL: Packet is dropped.
+ *  - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped.
+ *  - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped.  Recirculation is canceled.
+ *  - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not
+ *    dropped. Recirculation continues.
+ *
+ * Once a DROP action is set, it cannot be reverted during the classification
+ * process of a network packet.
+ */
+static inline union cpfl_action_set
+cpfl_act_drop(uint8_t prec)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1);
+	return a;
+}
+
+/**
+ * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action
+ * An ACT_COMMIT action specifies if and when all actions are committed.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M,
+				  (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S);
+	return a;
+}
+
+/*******************************************************************************
+ * 8-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_meta - Encode an 8-bit MOD_META action
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_meta(uint8_t prec, uint8_t prof)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META,
+				  CPFL_ACT_8B_MOD_META_VALID | prof);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 16-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI)
+ *
+ * This encodes the "Forward to Single VSI" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    vsi >= CPFL_ACT_16B_FWD_VSI_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port)
+ *
+ * This encodes the "Forward to a port" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    port >= CPFL_ACT_16B_FWD_PORT_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 24-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action
+ *
+ * This MOD_ADDR specifies the index of the MOD content entry an accompanying
+ * MOD_PROFILE action uses.  Some MOD_PROFILE actions may need to use extra
+ * information from a Modify content entry, and requires an accompanying
+ * MOD_ADDR action.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR,
+				     mod_addr);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant)
+ *
+ * This action is a "Forward to a single queue" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q,
+			bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region)
+ *
+ * This action is a "Forward to a queue region" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base,
+			       uint8_t q_rgn_bits, bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT ||
+	    q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base |
+		((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action
+ *
+ * This action specifies a Modify profile to use for modifying the network
+ * packet being classified.  In addition, it also provides a hint to whether
+ * or not an accompanied MOD_ADDR action is expected and should be prefetched.
+ *
+ * There is only one MOD_PROFILE action slot.  If multiple classification blocks
+ * emit this action, the precedence value and auxiliary precedence value will be
+ * used to select one with higher precedence.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus,
+		     bool miss_prepend, enum cpfl_act_mod_profile_hint hint)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) ||
+	    prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT ||
+	    ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_MOD_PROFILE_VALID |
+		(((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) &
+			CPFL_ACT_24B_MOD_PROFILE_HINT_M) |
+		(((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) &
+			CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) |
+		((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S);
+	if (append_act_bus)
+		val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS;
+	if (miss_prepend)
+		val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND;
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_meter - Encode a 24-bit METER action
+ *
+ * Return NOP if any given input parameter is invalid.
+ *
+ * A bank can only be used by one of the METER action slots.  If multiple METER
+ * actions select the same bank, the action with the highest action slot wins.
+ * In Policer mode, METER actions at the higher indexes have precedence over
+ * ones at lower indexes.
+ */
+static inline union cpfl_action_set
+cpfl_act_meter(uint8_t slot, uint8_t prec, uint32_t idx, uint8_t bank)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS  ||
+	    idx >= CPFL_ACT_24B_METER_INDEX_CNT ||
+	    bank >= CPFL_ACT_24B_METER_BANK_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_METER_VALID |
+		(uint32_t)idx << CPFL_ACT_24B_METER_INDEX_S |
+		(uint32_t)bank << CPFL_ACT_24B_METER_BANK_S;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot,
+				     val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot
+ *
+ * This SET_MD action sets/updates a byte of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 bytes of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX ||
+	    off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) |
+		((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) |
+		((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot
+ *
+ * This SET_MD action sets/updates a word of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 words of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX ||
+	    word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) |
+		((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) |
+		((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot
+ *
+ * This SET_MD action sets/updates a dword of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action is made up of 2 chained
+ * action sets.  The chained action set is the first.  The base/parent action
+ * sets is the second.
+ */
+static inline void
+cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid,
+		      uint8_t off, uint32_t val)
+{
+	if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) ||
+	    mid >= CPFL_METADATA_ID_CNT ||
+	    (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) {
+		ext->acts[0] = cpfl_act_nop();
+		ext->acts[1] = cpfl_act_nop();
+	} else {
+		uint32_t tmp;
+
+		/* Chained action set comes first */
+		tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR;
+		ext->acts[0].data =
+			CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B,
+						     mid, off, tmp);
+
+		/* Lower 24 bits of value */
+		tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M;
+		ext->acts[1].data =
+			CPFL_ACT_MAKE_24B_B(prec,
+					    CPFL_ACT_24B_INDEX_SET_MD + slot,
+					    tmp);
+	}
+}
+
+#endif /* _CPFL_ACTIONS_H_ */
diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c
new file mode 100644
index 0000000000..3d259d3da8
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include <base/idpf_controlq.h>
+#include <stdint.h>
+#include "cpfl_rules.h"
+
+ /**
+  * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor
+  */
+static inline uint64_t
+cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
+{
+	uint64_t context = 0;
+
+	switch (cmn_cfg->opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		/* fallthrough */
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+	case cpfl_ctlq_sem_add_rule:
+	case cpfl_ctlq_sem_del_rule:
+	case cpfl_ctlq_sem_query_rule:
+	case cpfl_ctlq_sem_update_rule:
+		context |= SHIFT_VAL64(cmn_cfg->time_sel,
+				       MEV_RULE_TIME_SEL);
+		context |= SHIFT_VAL64(cmn_cfg->time_sel_val,
+				       MEV_RULE_TIME_SEL_VAL);
+		context |= SHIFT_VAL64(cmn_cfg->host_id,
+				       MEV_RULE_HOST_ID);
+		context |= SHIFT_VAL64(cmn_cfg->port_num,
+				       MEV_RULE_PORT_NUM);
+		context |= SHIFT_VAL64(cmn_cfg->resp_req,
+				       MEV_RULE_RESP_REQ);
+		context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru,
+				       MEV_RULE_CACHE_WR_THRU);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc_ctx - get bit context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
+{
+	uint64_t context = 0;
+
+	context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common);
+
+	switch (cfg_data->common.opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size,
+				       MEV_RULE_MOD_OBJ_SIZE);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content,
+				       MEV_RULE_PIN_MOD_CONTENT);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.index,
+				       MEV_RULE_MOD_INDEX);
+		break;
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
+				       MEV_RULE_OBJ_ID);
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
+				       MEV_RULE_OBJ_ADDR);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc - build descriptor data from rule config data
+ *
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg)
+{
+	uint64_t context;
+	uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0];
+
+	context = cpfl_prep_rule_desc_ctx(cfg_data);
+	*ctlq_ctx = CPU_TO_LE64(context);
+	memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t));
+	ctlq_msg->opcode = (uint16_t)cfg_data->common.opc;
+	ctlq_msg->data_len = cfg_data->common.buf_len;
+	ctlq_msg->status = 0;
+	ctlq_msg->ctx.indirect.payload = cfg_data->common.payload;
+}
+
+/**
+ * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob)
+{
+	uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions;
+	const uint32_t *act_src = (const uint32_t *)act_bytes;
+	uint32_t i;
+
+	idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
+	idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len,
+		    CPFL_NONDMA_TO_DMA);
+
+	for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
+		*act_dst++ = CPU_TO_LE32(*act_src++);
+
+	rule_blob->sem_rule.cfg_ctrl[0] = cfg_ctrl & 0xFF;
+	rule_blob->sem_rule.cfg_ctrl[1] = (cfg_ctrl >> 8) & 0xFF;
+}
diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h
new file mode 100644
index 0000000000..d23eae8e91
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_RULES_API_H_
+#define _CPFL_RULES_API_H_
+
+#include <base/idpf_controlq_api.h>
+#include "cpfl_actions.h"
+#include "cpfl_controlq.h"
+
+/* Common Bit Mask Macros */
+#define CPFL_BIT(b)			(1 << (b))
+
+#define MAKE_MASK(type, mask, shift)	((u##type) (mask) << (shift))
+#define SHIFT_VAL_LT(type, val, field)		\
+		(((u##type)(val) << field##_S) & field##_M)
+#define SHIFT_VAL_RT(type, val, field)		\
+		(((u##type)(val) & field##_M) >> field##_S)
+
+#define MAKE_MASK_VAL(type, bit_len)	(((u##type)0x01 << (bit_len)) - 1)
+#define MAKE_MASK_VAL16(bit_len)	MAKE_MASK_VAL(16, bit_len)
+#define MAKE_MASK_VAL64(bit_len)	MAKE_MASK_VAL(64, bit_len)
+
+#define MAKE_MASK64(mask, shift)	MAKE_MASK(64, mask, shift)
+#define MAKE_MASK16(mask, shift)	MAKE_MASK(16, mask, shift)
+#define MAKE_MASK32(mask, shift)	MAKE_MASK(32, mask, shift)
+
+/* Make masks with bit length and left-shifting count */
+#define MAKE_SMASK(type, bits, shift)	\
+	((((u##type)1 << (bits)) - 1) << (shift))
+#define MAKE_SMASK64(bits, shift)	MAKE_SMASK(64, bits, shift)
+#define MAKE_SMASK32(bits, shift)	MAKE_SMASK(32, bits, shift)
+#define MAKE_SMASK16(bits, shift)	MAKE_SMASK(16, bits, shift)
+
+#define SHIFT_VAL64(val, field)		SHIFT_VAL_LT(64, val, field)
+#define SHIFT_VAL32(val, field)		SHIFT_VAL_LT(32, val, field)
+#define SHIFT_VAL16(val, field)		SHIFT_VAL_LT(16, val, field)
+
+/* Rule Config queue opcodes */
+enum cpfl_ctlq_rule_cfg_opc {
+	cpfl_ctlq_sem_add_rule				= 0x1303,
+	cpfl_ctlq_sem_update_rule			= 0x1304,
+	cpfl_ctlq_sem_del_rule				= 0x1305,
+	cpfl_ctlq_sem_query_rule			= 0x1306,
+	cpfl_ctlq_sem_query_rule_hash_addr		= 0x1307,
+	cpfl_ctlq_sem_query_del_rule_hash_addr		= 0x1308,
+
+	cpfl_ctlq_mod_add_update_rule			= 0x1360,
+	cpfl_ctlq_mod_query_rule			= 0x1361,
+};
+
+enum cpfl_cfg_pkt_error_code {
+	CPFL_CFG_PKT_ERR_OK = 0,
+	CPFL_CFG_PKT_ERR_ESRCH = 1,     /* Bad opcode */
+	CPFL_CFG_PKT_ERR_EEXIST = 2,    /* Entry Already exists */
+	CPFL_CFG_PKT_ERR_ENOSPC = 4,    /* No space left in the table*/
+	CPFL_CFG_PKT_ERR_ERANGE = 5,    /* Parameter out of range */
+	CPFL_CFG_PKT_ERR_ESBCOMP = 6,   /* Completion Error */
+	CPFL_CFG_PKT_ERR_ENOPIN = 7,    /* Entry cannot be pinned in cache */
+	CPFL_CFG_PKT_ERR_ENOTFND = 8,   /* Entry Not exists */
+	CPFL_CFG_PKT_ERR_EMAXCOL = 9    /* Max Hash Collision */
+};
+
+/* macros for creating context for rule descriptor */
+#define MEV_RULE_VSI_ID_S		0
+#define MEV_RULE_VSI_ID_M		\
+		MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S)
+
+#define MEV_RULE_TIME_SEL_S		13
+#define MEV_RULE_TIME_SEL_M		\
+		MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S)
+
+#define MEV_RULE_TIME_SEL_VAL_S		15
+#define MEV_RULE_TIME_SEL_VAL_M		\
+		MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S)
+
+#define MEV_RULE_PORT_NUM_S		16
+#define MEV_RULE_HOST_ID_S		18
+#define MEV_RULE_PORT_NUM_M		\
+		MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S)
+#define MEV_RULE_HOST_ID_M		\
+		MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S)
+
+#define MEV_RULE_CACHE_WR_THRU_S	21
+#define MEV_RULE_CACHE_WR_THRU_M	\
+		MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S)
+
+#define MEV_RULE_RESP_REQ_S		22
+#define MEV_RULE_RESP_REQ_M		\
+		MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S)
+#define MEV_RULE_OBJ_ADDR_S		24
+#define MEV_RULE_OBJ_ADDR_M		\
+		MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S)
+#define MEV_RULE_OBJ_ID_S		59
+#define MEV_RULE_OBJ_ID_M		\
+		MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S)
+
+/* macros for creating CFG_CTRL for sem/lem rule blob */
+#define MEV_RULE_CFG_CTRL_PROF_ID_S			0
+#define MEV_RULE_CFG_CTRL_PROF_ID_M			\
+		MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S)
+
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S		11
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M		\
+		MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S)
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_S		13
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S)
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S	14
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M	\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S)
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S		15
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S)
+
+/**
+ * macro to build the CFG_CTRL for rule packet data, which is one of
+ * cpfl_prep_sem_rule_blob()'s input parameter.
+ */
+ /* build SEM CFG_CTRL*/
+#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id,		       \
+				       pin_to_cache, fixed_fetch)	       \
+		(SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID)	     | \
+		 SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID)   | \
+		 SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE)    | \
+		 SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH))
+
+/* build LEM CFG_CTRL*/
+#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror)    \
+		(SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID)             | \
+		 SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE)      | \
+		 SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR))
+
+/* macros for creating mod content config packets */
+#define MEV_RULE_MOD_INDEX_S		24
+#define MEV_RULE_MOD_INDEX_M		\
+		MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S)
+
+#define MEV_RULE_PIN_MOD_CONTENT_S	62
+#define MEV_RULE_PIN_MOD_CONTENT_M	\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S)
+#define MEV_RULE_MOD_OBJ_SIZE_S		63
+#define MEV_RULE_MOD_OBJ_SIZE_M		\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S)
+
+/**
+ * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM
+ * note: The key may be in mixed big/little endian format, the rest of members
+ * are in little endian
+ */
+struct cpfl_sem_rule_cfg_pkt {
+#define MEV_SEM_RULE_KEY_SIZE 128
+	uint8_t key[MEV_SEM_RULE_KEY_SIZE];
+
+#define MEV_SEM_RULE_ACT_SIZE 72
+	uint8_t actions[MEV_SEM_RULE_ACT_SIZE];
+
+	/* Bit(s):
+	 * 10:0 : PROFILE_ID
+	 * 12:11: SUB_PROF_ID (used for SEM only)
+	 * 13   : pin the SEM key content into the cache
+	 * 14   : Reserved
+	 * 15   : Fixed_fetch
+	 */
+	uint8_t cfg_ctrl[2];
+
+	/* Bit(s):
+	 * 0:     valid
+	 * 15:1:  Hints
+	 * 26:16: PROFILE_ID, the profile associated with the entry
+	 * 31:27: PF
+	 * 55:32: FLOW ID (assigned by HW)
+	 * 63:56: EPOCH
+	 */
+	uint8_t ctrl_word[8];
+	uint8_t padding[46];
+};
+
+/**
+ * union cpfl_rule_cfg_pkt_record - Describes rule data blob
+ */
+union cpfl_rule_cfg_pkt_record {
+	struct cpfl_sem_rule_cfg_pkt sem_rule;
+	uint8_t pkt_data[256];
+	uint8_t mod_blob[256];
+};
+
+/**
+ * cpfl_rule_query_addr - LEM/SEM Rule Query Address structure
+ */
+struct cpfl_rule_query_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_query_del_addr - Rule Query and Delete Address
+ */
+struct cpfl_rule_query_del_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_mod_content - MOD Rule Content
+ */
+struct cpfl_rule_mod_content {
+	uint8_t	obj_size;
+	uint8_t	pin_content;
+	uint32_t	index;
+};
+
+/**
+ * cpfl_rule_cfg_data_common - data struct for all rule opcodes
+ *note: some rules may only require part of structure
+ */
+struct cpfl_rule_cfg_data_common {
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	uint64_t	cookie;
+	uint16_t	vsi_id;
+	uint8_t	port_num;
+	uint8_t	host_id;
+	uint8_t	time_sel;
+	uint8_t	time_sel_val;
+	uint8_t	cache_wr_thru;
+	uint8_t	resp_req;
+	uint32_t	ret_val;
+	uint16_t	buf_len;
+	struct idpf_dma_mem *payload;
+};
+
+/**
+ * cpfl_rule_cfg_data - rule config data
+ * note: Before sending rule to HW, caller needs to fill
+ *       in this struct then call cpfl_prep_rule_desc().
+ */
+struct cpfl_rule_cfg_data {
+	struct cpfl_rule_cfg_data_common common;
+	union {
+		struct cpfl_rule_query_addr query_addr;
+		struct cpfl_rule_query_del_addr query_del_addr;
+		struct cpfl_rule_mod_content mod_content;
+	} ext;
+};
+
+/**
+ * cpfl_fill_rule_mod_content - fill info for mod content
+ */
+static inline void
+cpfl_fill_rule_mod_content(uint8_t mod_obj_size,
+			   uint8_t pin_mod_content,
+			   uint32_t mod_index,
+			   struct cpfl_rule_mod_content *mod_content)
+{
+	mod_content->obj_size = mod_obj_size;
+	mod_content->pin_content = pin_mod_content;
+	mod_content->index = mod_index;
+}
+
+/**
+ * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes
+ * note: call this function before calls cpfl_prep_rule_desc()
+ */
+static inline void
+cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc,
+			       uint64_t cookie,
+			       uint16_t vsi_id,
+			       uint8_t port_num,
+			       uint8_t host_id,
+			       uint8_t time_sel,
+			       uint8_t time_sel_val,
+			       uint8_t cache_wr_thru,
+			       uint8_t resp_req,
+			       uint16_t payload_len,
+			       struct idpf_dma_mem *payload,
+			       struct cpfl_rule_cfg_data_common *cfg_cmn)
+{
+	cfg_cmn->opc = opc;
+	cfg_cmn->cookie = cookie;
+	cfg_cmn->vsi_id = vsi_id;
+	cfg_cmn->port_num = port_num;
+	cfg_cmn->resp_req = resp_req;
+	cfg_cmn->ret_val = 0;
+	cfg_cmn->host_id = host_id;
+	cfg_cmn->time_sel = time_sel;
+	cfg_cmn->time_sel_val = time_sel_val;
+	cfg_cmn->cache_wr_thru = cache_wr_thru;
+
+	cfg_cmn->buf_len = payload_len;
+	cfg_cmn->payload = payload;
+}
+
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg);
+
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob);
+
+#endif /* _CPFL_RULES_API_H_ */
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 290ff1e655..e2b6621cea 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -19,6 +19,7 @@ sources = files(
         'cpfl_vchnl.c',
         'cpfl_representor.c',
         'cpfl_controlq.c',
+	'cpfl_rules.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying
  2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
                                   ` (4 preceding siblings ...)
  2023-10-09  4:00                 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
@ 2023-10-09  4:00                 ` Zhang, Yuying
  2023-10-09  4:00                 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
                                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-10-09  4:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add a new module that implements FXP rule creation / destroying.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c   |  31 ++++
 drivers/net/cpfl/cpfl_ethdev.h   |   6 +
 drivers/net/cpfl/cpfl_fxp_rule.c | 263 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_fxp_rule.h |  68 ++++++++
 drivers/net/cpfl/meson.build     |   1 +
 5 files changed, 369 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a2bc6784d0..762fbddfe6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 #include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
 	adapter->vports[vport->sw_idx] = NULL;
+	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
 	rte_free(cpfl_vport);
 
 	return 0;
@@ -2466,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
 	return 0;
 }
 
+int
+cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size,
+			 int batch_size)
+{
+	int i;
+
+	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
+		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < batch_size; i++) {
+		dma[i].va = (void *)((char *)orig_dma->va + size * (i + 1));
+		dma[i].pa = orig_dma->pa + size * (i + 1);
+		dma[i].size = size;
+		dma[i].zone = NULL;
+	}
+	return 0;
+}
+
 static int
 cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
@@ -2515,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
 			    &dev->data->mac_addrs[0]);
 
+	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
+	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
+				       cpfl_vport->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		goto err_mac_addrs;
+
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
 		memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
 		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7f83d170d7..8eeeac9910 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -147,10 +147,14 @@ enum cpfl_itf_type {
 
 TAILQ_HEAD(cpfl_flow_list, rte_flow);
 
+#define CPFL_FLOW_BATCH_SIZE  490
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
 	struct cpfl_flow_list flow_list;
+	struct idpf_dma_mem flow_dma;
+	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
 	void *data;
 };
 
@@ -240,6 +244,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
+int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma,
+			     uint32_t size, int batch_size);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
new file mode 100644
index 0000000000..ea65e20507
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.c
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		   struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_msg **msg_ptr_list;
+	u16 clean_count = 0;
+	int num_cleaned = 0;
+	int retries = 0;
+	int ret = 0;
+
+	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+	if (!msg_ptr_list) {
+		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+		goto send_err;
+	}
+
+	while (retries <= CTLQ_SEND_RETRIES) {
+		clean_count = num_q_msg - num_cleaned;
+		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+					       &msg_ptr_list[num_cleaned]);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+			goto send_err;
+		}
+
+		num_cleaned += clean_count;
+		retries++;
+		if (num_cleaned >= num_q_msg)
+			break;
+		rte_delay_us_sleep(10);
+	}
+
+	if (retries > CTLQ_SEND_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for completions");
+		ret = -1;
+		goto send_err;
+	}
+
+send_err:
+	if (msg_ptr_list)
+		free(msg_ptr_list);
+err:
+	return ret;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		      struct idpf_ctlq_msg q_msg[])
+{
+	int retries = 0;
+	struct idpf_dma_mem *dma;
+	u16 i;
+	uint16_t buff_cnt;
+	int ret = 0;
+
+	retries = 0;
+	while (retries <= CTLQ_RECEIVE_RETRIES) {
+		rte_delay_us_sleep(10);
+		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+		    ret != CPFL_ERR_CTLQ_ERROR) {
+			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_EMPTY)
+			break;
+
+		/* TODO - process rx controlq message */
+		for (i = 0; i < num_q_msg; i++) {
+			if (q_msg[i].data_len > 0)
+				dma = q_msg[i].ctx.indirect.payload;
+			else
+				dma = NULL;
+
+			buff_cnt = dma ? 1 : 0;
+			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+			if (ret)
+				PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+		}
+		break;
+	}
+
+	if (retries > CTLQ_RECEIVE_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+static int
+cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		   struct idpf_ctlq_msg *msg)
+{
+	struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	struct cpfl_rule_cfg_data cfg = {0};
+
+	/* prepare rule blob */
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(&cfg, 0, sizeof(cfg));
+
+	/* fill info for both query and add/update */
+	cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+				   minfo->pin_mod_content,
+				   minfo->mod_index,
+				   &cfg.ext.mod_content);
+
+	/* only fill content for add/update */
+	memcpy(blob->mod_blob, minfo->mod_content,
+	       minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+	/* pack message */
+	cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+				       rinfo->cookie,
+				       0, /* vsi_id not used for mod */
+				       rinfo->port_num,
+				       NO_HOST_NEEDED,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       (u16)sizeof(*blob),
+				       (void *)dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		       struct idpf_ctlq_msg *msg, bool add)
+{
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	struct cpfl_rule_cfg_data cfg;
+	uint16_t cfg_ctrl;
+
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(msg, 0, sizeof(*msg));
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+							  rinfo->sem.sub_prof_id,
+							  rinfo->sem.pin_to_cache,
+							  rinfo->sem.fixed_fetch);
+		cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+					rinfo->act_bytes, rinfo->act_byte_len,
+					cfg_ctrl, blob);
+		opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+	} else {
+		PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
+		return -1;
+	}
+
+	cpfl_fill_rule_cfg_data_common(opc,
+				       rinfo->cookie,
+				       rinfo->vsi,
+				       rinfo->port_num,
+				       rinfo->host_id,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+	       struct idpf_ctlq_msg *msg, bool add)
+{
+	int ret = 0;
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
+			ret = -1;
+	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+		if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)
+			ret = -1;
+	} else {
+		PMD_INIT_LOG(ERR, "Invalid type of rule");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+int
+cpfl_rule_process(struct cpfl_itf *itf,
+		  struct idpf_ctlq_info *tx_cq,
+		  struct idpf_ctlq_info *rx_cq,
+		  struct cpfl_rule_info *rinfo,
+		  int rule_num,
+		  bool add)
+{
+	struct idpf_hw *hw = &itf->adapter->base.hw;
+	int i;
+	int ret = 0;
+
+	if (rule_num == 0)
+		return 0;
+
+	for (i = 0; i < rule_num; i++) {
+		ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Could not pack rule");
+			return ret;
+		}
+	}
+	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to send control message");
+		return ret;
+	}
+	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to update rule");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
new file mode 100644
index 0000000000..ed757b80b1
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+	uint16_t prof_id;
+	uint8_t sub_prof_id;
+	uint8_t key[CPFL_MAX_KEY_LEN];
+	uint8_t key_byte_len;
+	uint8_t pin_to_cache;
+	uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+	uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+	uint8_t mod_content_byte_len;
+	uint32_t mod_index;
+	uint8_t pin_mod_content;
+	uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+	CPFL_RULE_TYPE_NONE,
+	CPFL_RULE_TYPE_SEM,
+	CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+	enum cpfl_rule_type type;
+	uint64_t cookie;
+	uint8_t host_id;
+	uint8_t port_num;
+	uint8_t resp_req;
+	/* TODO: change this to be dynamically allocated/reallocated */
+	uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+	uint8_t act_byte_len;
+	/* vsi is used for lem and lpm rules */
+	uint16_t vsi;
+	uint8_t clear_mirror_1st_state;
+	/* mod related fields */
+	union {
+		struct cpfl_mod_rule_info mod;
+		struct cpfl_sem_rule_info sem;
+	};
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_process(struct cpfl_itf *itf,
+		      struct idpf_ctlq_info *tx_cq,
+		      struct idpf_ctlq_info *rx_cq,
+		      struct cpfl_rule_info *rinfo,
+		      int rule_num,
+		      bool add);
+int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		       struct idpf_ctlq_msg q_msg[]);
+int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+			  struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index e2b6621cea..6118a16329 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -45,6 +45,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
     sources += files(
 	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
+	    'cpfl_fxp_rule.c',
     )
     ext_deps += jansson_dep
 endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine
  2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
                                   ` (5 preceding siblings ...)
  2023-10-09  4:00                 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
@ 2023-10-09  4:00                 ` Zhang, Yuying
  2023-10-09  4:00                 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
                                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-10-09  4:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Adapt FXP implementation to a flow engine

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 doc/guides/nics/cpfl.rst                |  18 +-
 doc/guides/rel_notes/release_23_11.rst  |   1 +
 drivers/net/cpfl/cpfl_ethdev.h          |  27 ++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 582 ++++++++++++++++++++++++
 drivers/net/cpfl/meson.build            |   1 +
 5 files changed, 627 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index e17347d15c..ae5487f2f6 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -197,8 +197,22 @@ low level hardware resources.
 
     * For Ubuntu, it can be installed using `apt install libjansson-dev`
 
-- run testpmd with the json file
+- run testpmd with the json file, create two vports
 
    .. code-block:: console
 
-   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0],flow_parser="refpkg.json" -- -i
+   dpdk-testpmd -c 0x3 -n 4 -a 0000:af:00.6,vport=[0-1],flow_parser="refpkg.json" -- -i
+
+#. Create one flow to forward ETH-IPV4-TCP from I/O port to a local(CPF's) vport. Flow should be created on
+   vport X. Group M should match fxp module. Action port_representor Y means forward packet to local vport Y::
+
+   .. code-block:: console
+
+   flow create X ingress group M pattern eth dst is 00:01:00:00:03:14 / ipv4 src is 192.168.0.1 \
+   dst is 192.168.0.2 / tcp / end actions port_representor port_id Y / end
+
+#. Send a matched packet, and it should be displayed on PMD::
+
+   .. code-block:: console
+
+   sendp(Ether(dst='00:01:00:00:03:14')/IP(src='192.168.0.1',dst='192.168.0.2')/TCP(),iface="ens25f0")
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 8536ce88f4..16cdd674d3 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -85,6 +85,7 @@ New Features
 * **Updated Intel cpfl driver.**
 
   * Added support for port representor.
+  * Added support for rte_flow.
 
 * **Updated Intel iavf driver.**
   * Added support for iavf auto-reset.
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 8eeeac9910..efb0eb5251 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -85,6 +85,8 @@
 
 #define CPFL_RX_CFGQ_NUM	4
 #define CPFL_TX_CFGQ_NUM	4
+#define CPFL_FPCP_CFGQ_TX	0
+#define CPFL_FPCP_CFGQ_RX	1
 #define CPFL_CFGQ_NUM		8
 
 /* bit[15:14] type
@@ -219,6 +221,8 @@ struct cpfl_adapter_ext {
 	struct rte_hash *repr_allowlist_hash;
 
 	struct cpfl_flow_js_parser *flow_parser;
+	struct rte_bitmap *mod_bm;
+	void *mod_bm_mem;
 
 	struct cpfl_metadata meta;
 
@@ -312,4 +316,27 @@ cpfl_get_vsi_id(struct cpfl_itf *itf)
 	return CPFL_INVALID_HW_ID;
 }
 
+static inline struct cpfl_itf *
+cpfl_get_itf_by_port_id(uint16_t port_id)
+{
+	struct rte_eth_dev *dev;
+
+	if (port_id >= RTE_MAX_ETHPORTS) {
+		PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS);
+		return NULL;
+	}
+
+	dev = &rte_eth_devices[port_id];
+	if (dev->state == RTE_ETH_DEV_UNUSED) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id);
+		return NULL;
+	}
+
+	if (!dev->data) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id);
+		return NULL;
+	}
+
+	return CPFL_DEV_TO_ITF(dev);
+}
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
new file mode 100644
index 0000000000..4c7b4deb7a
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -0,0 +1,582 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <math.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include <ethdev_driver.h>
+#include "cpfl_rules.h"
+#include "cpfl_logs.h"
+#include "cpfl_ethdev.h"
+#include "cpfl_flow.h"
+#include "cpfl_fxp_rule.h"
+#include "cpfl_flow_parser.h"
+
+#define CPFL_COOKIE_DEF		0x1000
+#define CPFL_MOD_COOKIE_DEF	0x1237561
+#define CPFL_PREC_DEF		1
+#define CPFL_PREC_SET		5
+#define CPFL_TYPE_ID		3
+#define CPFL_OFFSET		0x0a
+#define CPFL_HOST_ID_DEF	0
+#define CPFL_PF_NUM_DEF		0
+#define CPFL_PORT_NUM_DEF	0
+#define CPFL_RESP_REQ_DEF	2
+#define CPFL_PIN_TO_CACHE_DEF	0
+#define CPFL_CLEAR_MIRROR_1ST_STATE_DEF	0
+#define CPFL_FIXED_FETCH_DEF	0
+#define CPFL_PTI_DEF		0
+#define CPFL_MOD_OBJ_SIZE_DEF	0
+#define CPFL_PIN_MOD_CONTENT_DEF	0
+
+#define CPFL_MAX_MOD_CONTENT_INDEX	256
+#define CPFL_MAX_MR_ACTION_NUM	8
+
+/* Struct used when parse detailed rule information with json file */
+struct cpfl_rule_info_meta {
+	struct cpfl_flow_pr_action pr_action;	/* json action field of pattern rule */
+	uint32_t pr_num;			/* number of pattern rules */
+	uint32_t mr_num;			/* number of modification rules */
+	uint32_t rule_num;			/* number of all rules */
+	struct cpfl_rule_info rules[0];
+};
+
+static uint32_t cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad);
+static void cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
+uint64_t cpfl_rule_cookie = CPFL_COOKIE_DEF;
+
+static int
+cpfl_fxp_create(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim = meta;
+	struct cpfl_vport *vport;
+
+	if (!rim)
+		return ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		/* Every vport has one pair control queues configured to handle message.
+		 * Even index is tx queue and odd index is rx queue.
+		 */
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
+				rim->rules, rim->rule_num, true);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "cpfl filter create flow fail");
+		rte_free(rim);
+		return ret;
+	}
+
+	flow->rule = rim;
+
+	return ret;
+}
+
+static inline void
+cpfl_fxp_rule_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+	flow->rule = NULL;
+}
+
+static int
+cpfl_fxp_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct cpfl_rule_info_meta *rim;
+	uint32_t i;
+	struct cpfl_vport *vport;
+
+	rim = flow->rule;
+	if (!rim) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "no such flow create by cpfl filter");
+
+		return -rte_errno;
+	}
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		ret = -rte_errno;
+		goto err;
+	}
+
+	ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules,
+				rim->rule_num, false);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to destroy cpfl filter rule");
+		goto err;
+	}
+
+	/* free mod index */
+	for (i = rim->pr_num; i < rim->rule_num; i++)
+		cpfl_fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
+err:
+	cpfl_fxp_rule_free(flow);
+	return ret;
+}
+
+static bool
+cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action,
+		       struct cpfl_rule_info_meta *rim,
+		       int i)
+{
+	if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		struct cpfl_rule_info *rinfo = &rim->rules[i];
+
+		rinfo->type = CPFL_RULE_TYPE_SEM;
+		rinfo->sem.prof_id = pr_action->sem.prof;
+		rinfo->sem.sub_prof_id = pr_action->sem.subprof;
+		rinfo->sem.key_byte_len = pr_action->sem.keysize;
+		memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len);
+		rinfo->sem.pin_to_cache = CPFL_PIN_TO_CACHE_DEF;
+		rinfo->sem.fixed_fetch = CPFL_FIXED_FETCH_DEF;
+	} else {
+		PMD_DRV_LOG(ERR, "Invalid pattern item.");
+		return false;
+	}
+
+	return true;
+}
+
+static int
+cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_rule_info *match_rinfo,
+		       struct cpfl_rule_info *mod_rinfo,
+		       const struct cpfl_flow_mr_action *mr_action)
+{
+	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
+	uint32_t mod_idx;
+	int i;
+	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
+	union cpfl_action_set *act_set =
+		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
+
+	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
+		return -EINVAL;
+
+	*act_set = cpfl_act_mod_profile(CPFL_PREC_DEF,
+					mr_action->mod.prof,
+					CPFL_PTI_DEF,
+					0, /* append */
+					0, /* prepend */
+					CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_idx = cpfl_fxp_mod_idx_alloc(adapter);
+	if (mod_idx == CPFL_MAX_MOD_CONTENT_INDEX) {
+		PMD_DRV_LOG(ERR, "Out of Mod Index.");
+		return -ENOMEM;
+	}
+
+	*act_set = cpfl_act_mod_addr(CPFL_PREC_DEF, mod_idx);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
+	minfo->mod_obj_size = CPFL_MOD_OBJ_SIZE_DEF;
+	minfo->pin_mod_content = CPFL_PIN_MOD_CONTENT_DEF;
+	minfo->mod_index = mod_idx;
+	mod_rinfo->cookie = CPFL_MOD_COOKIE_DEF;
+	mod_rinfo->port_num = CPFL_PORT_NUM_DEF;
+	mod_rinfo->resp_req = CPFL_RESP_REQ_DEF;
+
+	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
+	for (i = 0; i < minfo->mod_content_byte_len; i++)
+		minfo->mod_content[i] = mr_action->mod.data[i];
+
+	return 0;
+}
+
+#define CPFL_FXP_MAX_QREGION_SIZE 128
+#define CPFL_INVALID_QUEUE_ID -2
+static int
+cpfl_fxp_parse_action(struct cpfl_itf *itf,
+		      const struct rte_flow_action *actions,
+		      const struct cpfl_flow_mr_action *mr_action,
+		      struct cpfl_rule_info_meta *rim,
+		      int priority,
+		      int index)
+{
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *rss;
+	struct rte_eth_dev_data *data;
+	enum rte_flow_action_type action_type;
+	struct cpfl_vport *vport;
+	/* used when action is PORT_REPRESENTOR type */
+	struct cpfl_itf *dst_itf;
+	uint16_t dev_id; /* vsi id */
+	int queue_id = -1;
+	bool fwd_vsi = false;
+	bool fwd_q = false;
+	uint32_t i;
+	struct cpfl_rule_info *rinfo = &rim->rules[index];
+	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
+
+	priority = CPFL_PREC_MAX - priority;
+	for (action = actions; action->type !=
+	     RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			if (!fwd_vsi)
+				fwd_vsi = true;
+			else
+				goto err;
+
+			act_ethdev = action->conf;
+			dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id);
+
+			if (!dst_itf)
+				goto err;
+
+			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
+				vport = (struct cpfl_vport *)dst_itf;
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			} else {
+				queue_id = CPFL_INVALID_QUEUE_ID;
+			}
+
+			dev_id = cpfl_get_vsi_id(dst_itf);
+
+			if (dev_id == CPFL_INVALID_HW_ID)
+				goto err;
+
+			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			act_q = action->conf;
+			data = itf->data;
+			if (act_q->index >= data->nb_rx_queues)
+				goto err;
+
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += act_q->index;
+			*act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			if (rss->queue_num <= 1)
+				goto err;
+			for (i = 0; i < rss->queue_num - 1; i++) {
+				if (rss->queue[i + 1] != rss->queue[i] + 1)
+					goto err;
+			}
+			data = itf->data;
+			if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues)
+				goto err;
+			if (!(rte_is_power_of_2(rss->queue_num) &&
+			      rss->queue_num <= CPFL_FXP_MAX_QREGION_SIZE))
+				goto err;
+
+			if (!fwd_q)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == CPFL_INVALID_QUEUE_ID)
+				goto err;
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += rss->queue[0];
+			*act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id,
+								  log(rss->queue_num) / log(2), 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			(*act_set).data = cpfl_act_drop(priority).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			(*act_set).data = cpfl_act_set_commit_mode(priority, 0).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			goto err;
+		}
+	}
+
+	if (mr_action) {
+		uint32_t i;
+
+		for (i = 0; i < rim->mr_num; i++)
+			if (cpfl_parse_mod_content(itf->adapter, rinfo,
+						   &rim->rules[rim->pr_num + i],
+						   &mr_action[i]))
+				goto err;
+	}
+
+	return 0;
+
+err:
+	PMD_DRV_LOG(ERR, "Invalid action type");
+	return -EINVAL;
+}
+
+static void
+cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo)
+{
+	if (cpfl_rule_cookie == ~0llu)
+		cpfl_rule_cookie = CPFL_COOKIE_DEF;
+	rinfo->cookie = cpfl_rule_cookie++;
+	rinfo->host_id = CPFL_HOST_ID_DEF;
+	rinfo->port_num = CPFL_PORT_NUM_DEF;
+	rinfo->resp_req = CPFL_RESP_REQ_DEF;
+	rinfo->clear_mirror_1st_state = CPFL_CLEAR_MIRROR_1ST_STATE_DEF;
+}
+
+static bool
+cpfl_is_mod_action(const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			return true;
+		default:
+			continue;
+		}
+	}
+	return false;
+}
+
+static int
+cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      void **meta)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_pr_action pr_action = { 0 };
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_flow_mr_action mr_action[CPFL_MAX_MR_ACTION_NUM] = { 0 };
+	uint32_t pr_num = 0;
+	uint32_t mr_num = 0;
+	struct cpfl_rule_info_meta *rim;
+	int ret;
+
+	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "No Match pattern support.");
+		return -EINVAL;
+	}
+
+	if (cpfl_is_mod_action(actions)) {
+		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "action parse fails.");
+			return -EINVAL;
+		}
+		mr_num++;
+	}
+
+	pr_num = 1;
+	rim = rte_zmalloc(NULL,
+			  sizeof(struct cpfl_rule_info_meta) +
+			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
+			  0);
+	if (!rim)
+		return -ENOMEM;
+
+	rim->pr_action = pr_action;
+	rim->pr_num = pr_num;
+	rim->mr_num = mr_num;
+	rim->rule_num = pr_num + mr_num;
+
+	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid pattern");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid action");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	cpfl_fill_rinfo_default_value(&rim->rules[0]);
+
+	if (!meta)
+		rte_free(rim);
+	else
+		*meta = rim;
+
+	return 0;
+}
+
+static int
+cpfl_fxp_mod_init(struct cpfl_adapter_ext *ad)
+{
+	uint32_t size = rte_bitmap_get_memory_footprint(CPFL_MAX_MOD_CONTENT_INDEX);
+	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+	if (!mem)
+		return -ENOMEM;
+
+	/* a set bit represent a free slot */
+	ad->mod_bm = rte_bitmap_init_with_all_set(CPFL_MAX_MOD_CONTENT_INDEX, mem, size);
+	if (!ad->mod_bm) {
+		rte_free(mem);
+		return -EINVAL;
+	}
+
+	ad->mod_bm_mem = mem;
+
+	return 0;
+}
+
+static void
+cpfl_fxp_mod_uninit(struct cpfl_adapter_ext *ad)
+{
+	rte_free(ad->mod_bm_mem);
+	ad->mod_bm_mem = NULL;
+	ad->mod_bm = NULL;
+}
+
+static uint32_t
+cpfl_fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad)
+{
+	uint64_t slab = 0;
+	uint32_t pos = 0;
+
+	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
+		return CPFL_MAX_MOD_CONTENT_INDEX;
+
+	pos += __builtin_ffsll(slab) - 1;
+	rte_bitmap_clear(ad->mod_bm, pos);
+
+	return pos;
+}
+
+static void
+cpfl_fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
+{
+	rte_bitmap_set(ad->mod_bm, idx);
+}
+
+static int
+cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
+	       struct rte_flow *flow __rte_unused,
+	       struct rte_flow_query_count *count __rte_unused,
+	       struct rte_flow_error *error)
+{
+	rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_HANDLE,
+			   NULL,
+			   "count action not supported by this module");
+
+	return -rte_errno;
+}
+
+static void
+cpfl_fxp_uninit(struct cpfl_adapter_ext *ad)
+{
+	cpfl_fxp_mod_uninit(ad);
+}
+
+static int
+cpfl_fxp_init(struct cpfl_adapter_ext *ad)
+{
+	int ret = 0;
+
+	ret = cpfl_fxp_mod_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct
+cpfl_flow_engine cpfl_fxp_engine = {
+	.type = CPFL_FLOW_ENGINE_FXP,
+	.init = cpfl_fxp_init,
+	.uninit = cpfl_fxp_uninit,
+	.create = cpfl_fxp_create,
+	.destroy = cpfl_fxp_destroy,
+	.query_count = cpfl_fxp_query,
+	.parse_pattern_action = cpfl_fxp_parse_pattern_action,
+};
+
+RTE_INIT(cpfl_sw_engine_init)
+{
+	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
+
+	cpfl_flow_engine_register(engine);
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 6118a16329..5fd1cbd045 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -46,6 +46,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
 	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
 	    'cpfl_fxp_rule.c',
+	    'cpfl_flow_engine_fxp.c',
     )
     ext_deps += jansson_dep
 endif
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 8/9] net/cpfl: support flow ops on representor
  2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
                                   ` (6 preceding siblings ...)
  2023-10-09  4:00                 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
@ 2023-10-09  4:00                 ` Zhang, Yuying
  2023-10-09  4:00                 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
  2023-10-10  1:31                 ` [PATCH v10 0/9] add rte flow support for cpfl Zhang, Qi Z
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-10-09  4:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Add flow ops support for representor, so representor can
create, destroy, validate and flush rules.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 74 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.c     | 29 ++++++++++
 2 files changed, 103 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index 4c7b4deb7a..7a3376f9f6 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -72,6 +72,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 	struct cpfl_adapter_ext *ad = itf->adapter;
 	struct cpfl_rule_info_meta *rim = meta;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	if (!rim)
 		return ret;
@@ -82,6 +83,10 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
 		 * Even index is tx queue and odd index is rx queue.
 		 */
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -121,6 +126,7 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	uint32_t i;
 	struct cpfl_vport *vport;
+	struct cpfl_repr *repr;
 
 	rim = flow->rule;
 	if (!rim) {
@@ -134,6 +140,10 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
 	if (itf->type == CPFL_ITF_TYPE_VPORT) {
 		vport = (struct cpfl_vport *)itf;
 		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		repr = (struct cpfl_repr *)itf;
+		cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+			  (CPFL_TX_CFGQ_NUM - 1)) * 2;
 	} else {
 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 				   "fail to find correct control queue");
@@ -413,6 +423,64 @@ cpfl_is_mod_action(const struct rte_flow_action actions[])
 	return false;
 }
 
+static bool
+cpfl_fxp_get_metadata_port(struct cpfl_itf *itf,
+			   const struct rte_flow_action actions[])
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	const struct rte_flow_action_ethdev *ethdev;
+	struct cpfl_itf *target_itf;
+	bool ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		ret = cpfl_metadata_write_port_id(itf);
+		if (!ret) {
+			PMD_DRV_LOG(ERR, "fail to write port id");
+			return false;
+		}
+	}
+
+	ret = cpfl_metadata_write_sourcevsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write source vsi id");
+		return false;
+	}
+
+	ret = cpfl_metadata_write_vsi(itf);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "fail to write vsi id");
+		return false;
+	}
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			ethdev = (const struct rte_flow_action_ethdev *)action->conf;
+			target_itf = cpfl_get_itf_by_port_id(ethdev->port_id);
+			if (!target_itf) {
+				PMD_DRV_LOG(ERR, "fail to get target_itf by port id");
+				return false;
+			}
+			ret = cpfl_metadata_write_targetvsi(target_itf);
+			if (!ret) {
+				PMD_DRV_LOG(ERR, "fail to write target vsi id");
+				return false;
+			}
+			break;
+		default:
+			continue;
+		}
+	}
+
+	return true;
+}
+
 static int
 cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
@@ -429,6 +497,12 @@ cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
 	struct cpfl_rule_info_meta *rim;
 	int ret;
 
+	ret = cpfl_fxp_get_metadata_port(itf, actions);
+	if (!ret) {
+		PMD_DRV_LOG(ERR, "Fail to save metadata.");
+		return -EINVAL;
+	}
+
 	ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, &pr_action);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "No Match pattern support.");
diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c
index 4d15a26c80..de3b426727 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,8 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 static int
 cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
@@ -374,6 +376,22 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
 	return 0;
 }
 
+static int
+cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev,
+			   const struct rte_flow_ops **ops)
+{
+	if (!dev)
+		return -EINVAL;
+
+#ifdef RTE_HAS_JANSSON
+	*ops = &cpfl_flow_ops;
+#else
+	*ops = NULL;
+	PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library.");
+#endif
+	return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.dev_start		= cpfl_repr_dev_start,
 	.dev_stop		= cpfl_repr_dev_stop,
@@ -385,6 +403,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 	.tx_queue_setup		= cpfl_repr_tx_queue_setup,
 
 	.link_update		= cpfl_repr_link_update,
+	.flow_ops_get		= cpfl_dev_repr_flow_ops_get,
 };
 
 static int
@@ -393,6 +412,7 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
 	struct cpfl_repr_param *param = init_param;
 	struct cpfl_adapter_ext *adapter = param->adapter;
+	int ret;
 
 	repr->repr_id = param->repr_id;
 	repr->vport_info = param->vport_info;
@@ -402,6 +422,15 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 	if (repr->vport_info->vport.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
 		repr->func_up = true;
 
+	TAILQ_INIT(&repr->itf.flow_list);
+	memset(repr->itf.dma, 0, sizeof(repr->itf.dma));
+	memset(repr->itf.msg, 0, sizeof(repr->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		return ret;
+
 	eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
 	eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [PATCH v10 9/9] net/cpfl: support represented port action
  2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
                                   ` (7 preceding siblings ...)
  2023-10-09  4:00                 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
@ 2023-10-09  4:00                 ` Zhang, Yuying
  2023-10-10  1:31                 ` [PATCH v10 0/9] add rte flow support for cpfl Zhang, Qi Z
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Yuying @ 2023-10-09  4:00 UTC (permalink / raw)
  To: yuying.zhang, dev, qi.z.zhang, jingjing.wu, beilei.xing

From: Yuying Zhang <yuying.zhang@intel.com>

Support RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT action for forwarding
packet to APF/CPF/VF representors.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index 7a3376f9f6..ddede2f553 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -266,6 +266,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 	int queue_id = -1;
 	bool fwd_vsi = false;
 	bool fwd_q = false;
+	bool is_vsi;
 	uint32_t i;
 	struct cpfl_rule_info *rinfo = &rim->rules[index];
 	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
@@ -276,6 +277,7 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 		action_type = action->type;
 		switch (action_type) {
 		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
 			if (!fwd_vsi)
 				fwd_vsi = true;
 			else
@@ -294,12 +296,20 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf,
 				queue_id = CPFL_INVALID_QUEUE_ID;
 			}
 
-			dev_id = cpfl_get_vsi_id(dst_itf);
+			is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
+				  dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR);
+			if (is_vsi)
+				dev_id = cpfl_get_vsi_id(dst_itf);
+			else
+				dev_id = cpfl_get_port_id(dst_itf);
 
 			if (dev_id == CPFL_INVALID_HW_ID)
 				goto err;
 
-			*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			if (is_vsi)
+				*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			else
+				*act_set = cpfl_act_fwd_port(0, priority, 0, dev_id);
 			act_set++;
 			rinfo->act_byte_len += sizeof(union cpfl_action_set);
 			break;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v10 0/9] add rte flow support for cpfl
  2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
                                   ` (8 preceding siblings ...)
  2023-10-09  4:00                 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
@ 2023-10-10  1:31                 ` Zhang, Qi Z
  9 siblings, 0 replies; 128+ messages in thread
From: Zhang, Qi Z @ 2023-10-10  1:31 UTC (permalink / raw)
  To: Zhang, Yuying, dev, Wu, Jingjing, Xing, Beilei



> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Monday, October 9, 2023 12:00 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; dev@dpdk.org; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v10 0/9] add rte flow support for cpfl
> 
> From: Yuying Zhang <yuying.zhang@intel.com>
> 
> This patchset add rte flow support for cpfl driver.
> It depends on the following patch set:
> http://patchwork.dpdk.org/project/dpdk/cover/20230912173039.1612287-1-
> beilei.xing@intel.com/
> 
> Wenjing Qiao (2):
>   net/cpfl: parse flow offloading hint from JSON
>   net/cpfl: build action mapping rules from JSON
> 
> Yuying Zhang (7):
>   net/cpfl: set up flow offloading skeleton
>   net/cpfl: set up control path
>   net/cpfl: add FXP low level implementation
>   net/cpfl: implement FXP rule creation and destroying
>   net/cpfl: adapt FXP to flow engine
>   net/cpfl: support flow ops on representor
>   net/cpfl: support represented port action
> ---
> v10:
> * fix ci build issue
> 
> v9:
> * refine rx queue message process function
> 
> v8:
> * fix compile issues
> * refine document and separate patch with different features
> 
> v7:
> * refine commit log
> * fix compile issues
> 
> v6:
> * use existed jansson instead of json-c library
> * refine "add FXP low level implementation"
> 
> V5:
> * Add input validation for some functions
> 
>  doc/guides/nics/cpfl.rst                |   52 +
>  doc/guides/rel_notes/release_23_11.rst  |    1 +
>  drivers/net/cpfl/cpfl_actions.h         |  858 +++++++++++
>  drivers/net/cpfl/cpfl_controlq.c        |  801 ++++++++++
>  drivers/net/cpfl/cpfl_controlq.h        |   75 +
>  drivers/net/cpfl/cpfl_ethdev.c          |  392 ++++-
>  drivers/net/cpfl/cpfl_ethdev.h          |  128 ++
>  drivers/net/cpfl/cpfl_flow.c            |  339 +++++
>  drivers/net/cpfl/cpfl_flow.h            |   85 ++
>  drivers/net/cpfl/cpfl_flow_engine_fxp.c |  666 ++++++++
>  drivers/net/cpfl/cpfl_flow_parser.c     | 1835 +++++++++++++++++++++++
>  drivers/net/cpfl/cpfl_flow_parser.h     |  268 ++++
>  drivers/net/cpfl/cpfl_fxp_rule.c        |  263 ++++
>  drivers/net/cpfl/cpfl_fxp_rule.h        |   68 +
>  drivers/net/cpfl/cpfl_representor.c     |   29 +
>  drivers/net/cpfl/cpfl_rules.c           |  127 ++
>  drivers/net/cpfl/cpfl_rules.h           |  306 ++++
>  drivers/net/cpfl/cpfl_vchnl.c           |  144 ++
>  drivers/net/cpfl/meson.build            |   12 +
>  19 files changed, 6448 insertions(+), 1 deletion(-)  create mode 100644
> drivers/net/cpfl/cpfl_actions.h  create mode 100644
> drivers/net/cpfl/cpfl_controlq.c  create mode 100644
> drivers/net/cpfl/cpfl_controlq.h  create mode 100644
> drivers/net/cpfl/cpfl_flow.c  create mode 100644 drivers/net/cpfl/cpfl_flow.h
> create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
>  create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
>  create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
>  create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c  create mode 100644
> drivers/net/cpfl/cpfl_fxp_rule.h  create mode 100644
> drivers/net/cpfl/cpfl_rules.c  create mode 100644 drivers/net/cpfl/cpfl_rules.h
> 
> --
> 2.34.1

Applied to dpdk-next-net-intel after reverted the old version.

Thanks
Qi

^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [PATCH v9 0/9] add rte flow support for cpfl
  2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
                                 ` (12 preceding siblings ...)
  2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
@ 2023-10-15 11:21               ` Thomas Monjalon
  13 siblings, 0 replies; 128+ messages in thread
From: Thomas Monjalon @ 2023-10-15 11:21 UTC (permalink / raw)
  To: yuying.zhang, qi.z.zhang, jingjing.wu, beilei.xing; +Cc: dev, yuying.zhang

28/09/2023 10:44, yuying.zhang@intel.com:
> From: Yuying Zhang <yuying.zhang@intel.com>
> 
> This patchset add rte flow support for cpfl driver.
> It depends on the following patch set:
> http://patchwork.dpdk.org/project/dpdk/cover/20230912173039.1612287-1-beilei.xing@intel.com/
> 
> Wenjing Qiao (2):
>   net/cpfl: add json parser for rte flow pattern rules
>   net/cpfl: build action mapping rules from JSON
> 
> Yuying Zhang (7):
>   net/cpfl: set up rte flow skeleton
>   net/cpfl: set up control path
>   net/cpfl: add FXP low level implementation
>   net/cpfl: add fxp rule module
>   net/cpfl: add fxp flow engine
>   net/cpfl: add flow support for representor
>   net/cpfl: add support of to represented port action

You did not update the feature list in doc/guides/nics/features/cpfl.ini

You could have generated it with this command:
devtools/parse-flow-support.sh drivers/net/cpfl

This miss is caught by the script devtools/check-doc-vs-code.sh
Please run it.



^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [PATCH v9 3/9] net/cpfl: set up rte flow skeleton
  2023-09-28  8:44               ` [PATCH v9 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
@ 2023-10-15 13:01                 ` Thomas Monjalon
  2023-10-16  3:07                   ` Zhang, Qi Z
  0 siblings, 1 reply; 128+ messages in thread
From: Thomas Monjalon @ 2023-10-15 13:01 UTC (permalink / raw)
  To: qi.z.zhang
  Cc: yuying.zhang, dev, jingjing.wu, beilei.xing, yuying.zhang,
	david.marchand

28/09/2023 10:44, yuying.zhang@intel.com:
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_flow.h
> @@ -0,0 +1,85 @@
> +/* SPDX-Lidpfnse-Identifier: BSD-3-Clause

Are you sure?

This typo has been caught by devtools/check-spdx-tag.sh

Please run the scripts in devtools.



^ permalink raw reply	[flat|nested] 128+ messages in thread

* RE: [PATCH v9 3/9] net/cpfl: set up rte flow skeleton
  2023-10-15 13:01                 ` Thomas Monjalon
@ 2023-10-16  3:07                   ` Zhang, Qi Z
  0 siblings, 0 replies; 128+ messages in thread
From: Zhang, Qi Z @ 2023-10-16  3:07 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: Zhang, Yuying, dev, Wu, Jingjing, Xing, Beilei, Zhang, Yuying,
	david.marchand



> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Sunday, October 15, 2023 9:02 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: Zhang, Yuying <yuying.zhang@intel.com>; dev@dpdk.org; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Zhang, Yuying
> <yuying.zhang@intel.com>; david.marchand@redhat.com
> Subject: Re: [PATCH v9 3/9] net/cpfl: set up rte flow skeleton
> 
> 28/09/2023 10:44, yuying.zhang@intel.com:
> > --- /dev/null
> > +++ b/drivers/net/cpfl/cpfl_flow.h
> > @@ -0,0 +1,85 @@
> > +/* SPDX-Lidpfnse-Identifier: BSD-3-Clause
> 
> Are you sure?
> 
> This typo has been caught by devtools/check-spdx-tag.sh
> 
> Please run the scripts in devtools.

OK, added in my check list.

> 


^ permalink raw reply	[flat|nested] 128+ messages in thread

end of thread, other threads:[~2023-10-16  3:07 UTC | newest]

Thread overview: 128+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-08-12  7:55 [PATCH v1 0/5] add rte flow support for cpfl Yuying Zhang
2023-08-12  7:55 ` [PATCH v1 1/5] net/cpfl: setup rte flow skeleton Yuying Zhang
2023-08-25  3:55   ` Xing, Beilei
2023-08-12  7:55 ` [PATCH v1 2/5] common/idpf/base: refine idpf ctlq message structure Yuying Zhang
2023-08-25  5:55   ` Xing, Beilei
2023-08-12  7:55 ` [PATCH v1 3/5] net/cpfl: add cpfl control queue message handle Yuying Zhang
2023-08-25  6:23   ` Xing, Beilei
2023-08-12  7:55 ` [PATCH v1 4/5] net/cpfl: add fxp rule module Yuying Zhang
2023-08-25  7:35   ` Xing, Beilei
2023-08-25  8:42   ` Xing, Beilei
2023-08-12  7:55 ` [PATCH v1 5/5] net/cpfl: add fxp flow engine Yuying Zhang
2023-08-25  9:15   ` Xing, Beilei
2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 1/8] net/cpfl: parse flow parser file in devargs Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 2/8] net/cpfl: add flow json parser Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 3/8] net/cpfl: add FXP low level implementation Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 4/8] net/cpfl: setup ctrl path Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 5/8] net/cpfl: set up rte flow skeleton Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 6/8] net/cpfl: add fxp rule module Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 7/8] net/cpfl: add fxp flow engine Yuying Zhang
2023-09-01 11:31   ` [PATCH v2 8/8] net/cpfl: add flow support for representor Yuying Zhang
2023-09-06  9:33   ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
2023-08-15 16:50     ` [PATCH v4 " Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-09-15 15:11         ` Stephen Hemminger
2023-08-15 16:50       ` [PATCH v4 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 5/9] net/cpfl: add fxp rule module Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 7/9] net/cpfl: add flow support for representor Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 8/9] app/test-pmd: refine encap content Zhang, Yuying
2023-08-15 16:50       ` [PATCH v4 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying
2023-09-06  9:33     ` [PATCH v3 1/9] net/cpfl: parse flow parser file in devargs Wenjing Qiao
2023-09-11  0:48       ` Wu, Jingjing
2023-09-06  9:34     ` [PATCH v3 2/9] net/cpfl: add flow json parser Wenjing Qiao
2023-09-08  6:26       ` Liu, Mingxia
2023-09-11  6:24       ` Wu, Jingjing
2023-09-06  9:34     ` [PATCH v3 3/9] net/cpfl: add FXP low level implementation Wenjing Qiao
2023-09-06  9:34     ` [PATCH v3 4/9] net/cpfl: setup ctrl path Wenjing Qiao
2023-09-11  6:30       ` Liu, Mingxia
2023-09-11  6:36       ` Wu, Jingjing
2023-09-06  9:34     ` [PATCH v3 5/9] net/cpfl: set up rte flow skeleton Wenjing Qiao
2023-09-06  9:34     ` [PATCH v3 6/9] net/cpfl: add fxp rule module Wenjing Qiao
2023-09-12  7:40       ` FW: " Liu, Mingxia
2023-09-06  9:34     ` [PATCH v3 7/9] net/cpfl: add fxp flow engine Wenjing Qiao
2023-09-06  9:34     ` [PATCH v3 8/9] net/cpfl: add flow support for representor Wenjing Qiao
2023-09-06  9:34     ` [PATCH v3 9/9] app/test-pmd: refine encap content Wenjing Qiao
2023-09-15 10:00     ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
2023-08-22  1:02       ` [PATCH v6 0/8] " Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 1/8] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 2/8] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 3/8] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 4/8] net/cpfl: set up control path Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 5/8] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 6/8] net/cpfl: add fxp rule module Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 7/8] net/cpfl: add fxp flow engine Zhang, Yuying
2023-08-22  1:02         ` [PATCH v6 8/8] net/cpfl: add flow support for representor Zhang, Yuying
2023-09-26 18:16         ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
2023-09-26 18:16           ` [PATCH v7 1/8] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-26 19:03             ` Stephen Hemminger
2023-09-27  1:21               ` Zhang, Qi Z
2023-09-26 18:16           ` [PATCH v7 2/8] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-26 18:16           ` [PATCH v7 3/8] net/cpfl: set up rte flow skeleton yuying.zhang
2023-09-26 18:16           ` [PATCH v7 4/8] net/cpfl: set up control path yuying.zhang
2023-09-26 18:17           ` [PATCH v7 5/8] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-26 18:17           ` [PATCH v7 6/8] net/cpfl: add fxp rule module yuying.zhang
2023-09-28  3:29             ` Zhang, Qi Z
2023-09-26 18:17           ` [PATCH v7 7/8] net/cpfl: add fxp flow engine yuying.zhang
2023-09-26 18:17           ` [PATCH v7 8/8] net/cpfl: add flow support for representor yuying.zhang
2023-09-27 12:54           ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
2023-09-27 12:54             ` [PATCH v8 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-27 12:54             ` [PATCH v8 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-27 12:54             ` [PATCH v8 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
2023-09-27 12:54             ` [PATCH v8 4/9] net/cpfl: set up control path yuying.zhang
2023-09-27 12:54             ` [PATCH v8 5/9] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-27 12:54             ` [PATCH v8 6/9] net/cpfl: add fxp rule module yuying.zhang
2023-09-27 12:54             ` [PATCH v8 7/9] net/cpfl: add fxp flow engine yuying.zhang
2023-09-27 12:54             ` [PATCH v8 8/9] net/cpfl: add flow support for representor yuying.zhang
2023-09-27 12:54             ` [PATCH v8 9/9] net/cpfl: add support of to represented port action yuying.zhang
2023-09-28  3:37             ` [PATCH v8 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-09-28  8:44             ` [PATCH v9 " yuying.zhang
2023-09-08 16:05               ` [PATCH v10 " Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
2023-09-08 16:05                 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
2023-09-28  8:44               ` [PATCH v9 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-28  8:44               ` [PATCH v9 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-28  8:44               ` [PATCH v9 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
2023-10-15 13:01                 ` Thomas Monjalon
2023-10-16  3:07                   ` Zhang, Qi Z
2023-09-28  8:44               ` [PATCH v9 4/9] net/cpfl: set up control path yuying.zhang
2023-09-28  8:44               ` [PATCH v9 5/9] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-28  8:44               ` [PATCH v9 6/9] net/cpfl: add fxp rule module yuying.zhang
2023-09-28  8:44               ` [PATCH v9 7/9] net/cpfl: add fxp flow engine yuying.zhang
2023-09-28  8:44               ` [PATCH v9 8/9] net/cpfl: add flow support for representor yuying.zhang
2023-09-28  8:44               ` [PATCH v9 9/9] net/cpfl: add support of to represented port action yuying.zhang
2023-09-28 12:45               ` [PATCH v9 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-09-28 16:04               ` Stephen Hemminger
2023-10-09  4:00               ` [PATCH v10 " Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
2023-10-09  4:00                 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
2023-10-10  1:31                 ` [PATCH v10 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-10-15 11:21               ` [PATCH v9 " Thomas Monjalon
2023-09-15 10:00       ` [PATCH v5 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-09-15 11:14         ` Zhang, Qi Z
2023-09-15 10:00       ` [PATCH v5 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-09-15 10:00       ` [PATCH v5 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-09-15 10:00       ` [PATCH v5 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-09-15 11:19         ` Zhang, Qi Z
2023-09-15 10:00       ` [PATCH v5 5/9] net/cpfl: add fxp rule module Zhang, Yuying
2023-09-15 10:00       ` [PATCH v5 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
2023-09-15 10:00       ` [PATCH v5 7/9] net/cpfl: add flow support for representor Zhang, Yuying
2023-09-15 10:00       ` [PATCH v5 8/9] app/test-pmd: refine encap content Zhang, Yuying
2023-09-15 10:00       ` [PATCH v5 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).