DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/4] net/cpfl: add basic support for rte_flow
@ 2023-08-11  9:30 Wenjing Qiao
  2023-08-11  9:30 ` [PATCH 1/4] net/cpfl: parse flow parser file in devargs Wenjing Qiao
                   ` (3 more replies)
  0 siblings, 4 replies; 15+ messages in thread
From: Wenjing Qiao @ 2023-08-11  9:30 UTC (permalink / raw)
  To: yuying.zhang, beilei.xing; +Cc: dev, mingxia.liu, Wenjing Qiao

This patchset adds basic support for rte_flow.
---
Depends-on: series-29139 ("net/cpfl: support port representor")

Wenjing Qiao (4):
  net/cpfl: parse flow parser file in devargs
  net/cpfl: add flow json parser
  net/cpfl: introduce CPF common library
  net/cpfl: setup ctrl path

 drivers/net/cpfl/cpfl_actions.h     |  858 +++++++++++++
 drivers/net/cpfl/cpfl_controlq.c    |  380 ++++++
 drivers/net/cpfl/cpfl_controlq.h    |   51 +
 drivers/net/cpfl/cpfl_ethdev.c      |  300 ++++-
 drivers/net/cpfl/cpfl_ethdev.h      |   17 +
 drivers/net/cpfl/cpfl_flow_parser.c | 1758 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  205 ++++
 drivers/net/cpfl/cpfl_rules.c       |  126 ++
 drivers/net/cpfl/cpfl_rules.h       |  306 +++++
 drivers/net/cpfl/cpfl_vchnl.c       |  144 +++
 drivers/net/cpfl/meson.build        |   11 +
 11 files changed, 4152 insertions(+), 4 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 1/4] net/cpfl: parse flow parser file in devargs
  2023-08-11  9:30 [PATCH 0/4] net/cpfl: add basic support for rte_flow Wenjing Qiao
@ 2023-08-11  9:30 ` Wenjing Qiao
  2023-08-11 10:00   ` [PATCH v2 0/4] net/cpfl: add basic support for rte_flow Wenjing Qiao
                     ` (4 more replies)
  2023-08-11  9:30 ` [PATCH 2/4] net/cpfl: add flow json parser Wenjing Qiao
                   ` (2 subsequent siblings)
  3 siblings, 5 replies; 15+ messages in thread
From: Wenjing Qiao @ 2023-08-11  9:30 UTC (permalink / raw)
  To: yuying.zhang, beilei.xing; +Cc: dev, mingxia.liu, Wenjing Qiao

Add devargs "flow_parser" for rte_flow json parser.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 30 +++++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_ethdev.h |  3 +++
 drivers/net/cpfl/meson.build   |  6 ++++++
 3 files changed, 38 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 8dbc175749..a2f308fb86 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -21,6 +21,7 @@
 #define CPFL_TX_SINGLE_Q	"tx_single"
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
+#define CPFL_FLOW_PARSER	"flow_parser"
 
 rte_spinlock_t cpfl_adapter_lock;
 /* A list for all adapters, one adapter matches one PCI device */
@@ -32,6 +33,9 @@ static const char * const cpfl_valid_args_first[] = {
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	CPFL_FLOW_PARSER,
+#endif
 	NULL
 };
 
@@ -1671,6 +1675,19 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 	return 0;
 }
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+static int
+parse_parser_file(const char *key, const char *value, void *args)
+{
+	char *name = args;
+
+	PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key);
+	strlcpy(name, value, CPFL_FLOW_FILE_LEN);
+
+	return 0;
+}
+#endif
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
@@ -1719,7 +1736,18 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 				 &adapter->base.is_rx_singleq);
 	if (ret != 0)
 		goto fail;
-
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) {
+		ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER,
+					 &parse_parser_file, cpfl_args->flow_parser);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to parser flow_parser, ret: %d", ret);
+			goto fail;
+		}
+	} else {
+		cpfl_args->flow_parser[0] = '\0';
+	}
+#endif
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 5bd6f930b8..cf989a29b3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -87,6 +87,8 @@
 #define ACC_LCE_ID	15
 #define IMC_MBX_EFD_ID	0
 
+#define CPFL_FLOW_FILE_LEN 100
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -100,6 +102,7 @@ struct cpfl_devargs {
 	uint16_t req_vport_nb;
 	uint8_t repr_args_num;
 	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
+	char flow_parser[CPFL_FLOW_FILE_LEN];
 };
 
 struct p2p_queue_chunks_info {
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index fb075c6860..0be25512c3 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -38,3 +38,9 @@ if arch_subdir == 'x86'
         cflags += ['-DCC_AVX512_SUPPORT']
     endif
 endif
+
+js_dep = dependency('json-c', required: false, method : 'pkg-config')
+if js_dep.found()
+    dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
+    ext_deps += js_dep
+endif
\ No newline at end of file
-- 
2.34.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 2/4] net/cpfl: add flow json parser
  2023-08-11  9:30 [PATCH 0/4] net/cpfl: add basic support for rte_flow Wenjing Qiao
  2023-08-11  9:30 ` [PATCH 1/4] net/cpfl: parse flow parser file in devargs Wenjing Qiao
@ 2023-08-11  9:30 ` Wenjing Qiao
  2023-08-11  9:30 ` [PATCH 3/4] net/cpfl: introduce CPF common library Wenjing Qiao
  2023-08-11  9:30 ` [PATCH 4/4] net/cpfl: setup ctrl path Wenjing Qiao
  3 siblings, 0 replies; 15+ messages in thread
From: Wenjing Qiao @ 2023-08-11  9:30 UTC (permalink / raw)
  To: yuying.zhang, beilei.xing; +Cc: dev, mingxia.liu, Wenjing Qiao

A JSON file will be used to direct DPDK CPF PMD to
parse rte_flow tokens into low level hardware resources
defined in a DDP package file.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_flow_parser.c | 1758 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  205 ++++
 drivers/net/cpfl/meson.build        |    3 +
 3 files changed, 1966 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h

diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
new file mode 100644
index 0000000000..b4635813ff
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -0,0 +1,1758 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <arpa/inet.h>
+#include <asm-generic/errno-base.h>
+#include <stdint.h>
+
+#include "cpfl_flow_parser.h"
+#include "cpfl_ethdev.h"
+#include "rte_malloc.h"
+
+static enum rte_flow_item_type
+cpfl_get_item_type_by_str(const char *type)
+{
+	if (strcmp(type, "eth") == 0)
+		return RTE_FLOW_ITEM_TYPE_ETH;
+	else if (strcmp(type, "ipv4") == 0)
+		return RTE_FLOW_ITEM_TYPE_IPV4;
+	else if (strcmp(type, "tcp") == 0)
+		return RTE_FLOW_ITEM_TYPE_TCP;
+	else if (strcmp(type, "udp") == 0)
+		return RTE_FLOW_ITEM_TYPE_UDP;
+	else if (strcmp(type, "vxlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VXLAN;
+	else if (strcmp(type, "icmp") == 0)
+		return RTE_FLOW_ITEM_TYPE_ICMP;
+	else if (strcmp(type, "vlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VLAN;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ITEM_TYPE_VOID;
+}
+
+static enum rte_flow_action_type
+cpfl_get_action_type_by_str(const char *type)
+{
+	if (strcmp(type, "vxlan_encap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ACTION_TYPE_VOID;
+}
+
+static const char *
+cpfl_json_object_to_string(json_object *object, const char *name)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return NULL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return 0;
+	}
+	return json_object_get_string(subobject);
+}
+
+static int
+cpfl_json_object_to_int(json_object *object, const char *name, int *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int(subobject);
+	return 0;
+}
+
+static int
+cpfl_json_object_to_uint16(json_object *object, const char *name, uint16_t *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int(subobject);
+	return 0;
+}
+
+static int
+cpfl_json_object_to_uint32(json_object *object, const char *name, uint32_t *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int64(subobject);
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_attr(json_object *cjson_pr_key_attr, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len;
+	struct cpfl_flow_js_pr_key_attr *attr;
+
+	len = json_object_array_length(cjson_pr_key_attr);
+	js_pr->key.attributes = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_attr), 0);
+	if (!js_pr->key.attributes) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->key.attr_size = len;
+	attr = js_pr->key.attributes;
+
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		const char *name;
+		uint16_t value = 0;
+		int ret;
+
+		object = json_object_array_get_idx(cjson_pr_key_attr, i);
+		name = cpfl_json_object_to_string(object, "Name");
+		if (!name) {
+			rte_free(js_pr->key.attributes);
+			PMD_DRV_LOG(ERR, "Can not parse string 'Name'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(object, "Value", &value);
+		if (ret < 0) {
+			rte_free(js_pr->key.attributes);
+			PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+			return -EINVAL;
+		}
+		if (strcmp(name, "ingress") == 0) {
+			attr->ingress = value;
+		} else if (strcmp(name, "egress") == 0) {
+			attr->egress = value;
+		} else {
+			/* TODO: more... */
+			rte_free(js_pr->key.attributes);
+			PMD_DRV_LOG(ERR, "Not support attr name: %s.", name);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto_field(json_object *cjson_field,
+				     struct cpfl_flow_js_pr_key_proto *js_field)
+{
+	if (cjson_field) {
+		int len, i;
+
+		len = json_object_array_length(cjson_field);
+		js_field->fields_size = len;
+		if (len == 0)
+			return 0;
+		js_field->fields =
+		    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len, 0);
+		if (!js_field->fields) {
+			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+			return -ENOMEM;
+		}
+		for (i = 0; i < len; i++) {
+			json_object *object;
+			const char *name, *mask;
+
+			object = json_object_array_get_idx(cjson_field, i);
+			name = cpfl_json_object_to_string(object, "name");
+			if (!name) {
+				rte_free(js_field->fields);
+				PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
+				return -EINVAL;
+			}
+			if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+				rte_free(js_field->fields);
+				PMD_DRV_LOG(ERR, "The 'name' is too long.");
+				return -EINVAL;
+			}
+			memcpy(js_field->fields[i].name, name, strlen(name));
+
+			if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
+			    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				mask = cpfl_json_object_to_string(object, "mask");
+				if (!mask) {
+					rte_free(js_field->fields);
+					PMD_DRV_LOG(ERR, "Can not parse string 'mask'.");
+					return -EINVAL;
+				}
+				memcpy(js_field->fields[i].mask, mask, strlen(mask));
+			} else {
+				uint32_t mask_32b;
+				int ret;
+
+				ret = cpfl_json_object_to_uint32(object, "mask", &mask_32b);
+				if (ret < 0) {
+					rte_free(js_field->fields);
+					PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'.");
+					return -EINVAL;
+				}
+				js_field->fields[i].mask_32b = mask_32b;
+			}
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto(json_object *cjson_pr_key_proto, struct cpfl_flow_js_pr *js_pr)
+{
+	int len, i, ret;
+
+	len = json_object_array_length(cjson_pr_key_proto);
+	js_pr->key.proto_size = len;
+	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto) * len, 0);
+	if (!js_pr->key.protocols) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_object *object, *cjson_pr_key_proto_fields;
+		const char *type;
+		enum rte_flow_item_type item_type;
+
+		object = json_object_array_get_idx(cjson_pr_key_proto, i);
+		/* pr->key->proto->type */
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			rte_free(js_pr->key.protocols);
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			return -EINVAL;
+		}
+		item_type = cpfl_get_item_type_by_str(type);
+		if (item_type == RTE_FLOW_ITEM_TYPE_VOID) {
+			rte_free(js_pr->key.protocols);
+			return -EINVAL;
+		}
+		js_pr->key.protocols[i].type = item_type;
+		/* pr->key->proto->fields */
+		cjson_pr_key_proto_fields = json_object_object_get(object, "fields");
+		ret = cpfl_flow_js_pattern_key_proto_field(cjson_pr_key_proto_fields,
+							   &js_pr->key.protocols[i]);
+		if (ret < 0) {
+			rte_free(js_pr->key.protocols);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_proto(json_object *cjson_value, struct cpfl_flow_js_fv *js_fv)
+{
+	uint16_t layer = 0, offset = 0, mask = 0;
+	const char *header;
+	enum rte_flow_item_type type;
+	int ret;
+
+	ret = cpfl_json_object_to_uint16(cjson_value, "layer", &layer);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+		return -EINVAL;
+	}
+
+	header = cpfl_json_object_to_string(cjson_value, "header");
+	if (!header) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_object_to_uint16(cjson_value, "offset", &offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_object_to_uint16(cjson_value, "mask", &mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
+		return -EINVAL;
+	}
+	js_fv->proto.layer = layer;
+	js_fv->proto.offset = offset;
+	js_fv->proto.mask = mask;
+	type = cpfl_get_item_type_by_str(header);
+	if (type == RTE_FLOW_ITEM_TYPE_VOID)
+		return -EINVAL;
+
+	else
+		js_fv->proto.header = type;
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv(json_object *cjson_fv, struct cpfl_flow_js_pr_action *js_act)
+{
+	int len, i;
+
+	len = json_object_array_length(cjson_fv);
+	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0);
+	if (!js_act->sem.fv) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_act->sem.fv_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_fv *js_fv;
+		json_object *object, *cjson_value;
+		uint16_t offset = 0;
+		const char *type;
+		int ret;
+
+		object = json_object_array_get_idx(cjson_fv, i);
+		js_fv = &js_act->sem.fv[i];
+		ret = cpfl_json_object_to_uint16(object, "offset", &offset);
+		if (ret < 0) {
+			rte_free(js_act->sem.fv);
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			return -EINVAL;
+		}
+		js_fv->offset = offset;
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			rte_free(js_act->sem.fv);
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			return -EINVAL;
+		}
+		cjson_value = json_object_object_get(object, "value");
+		if (strcmp(type, "immediate") == 0) {
+			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
+			js_fv->immediate = json_object_get_int(cjson_value);
+		}  else if (strcmp(type, "protocol") == 0) {
+			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
+			cpfl_flow_js_pattern_act_fv_proto(cjson_value, js_fv);
+		} else {
+			rte_free(js_act->sem.fv);
+			PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_per_act(json_object *cjson_per_act, struct cpfl_flow_js_pr_action *js_act)
+{
+	const char *type;
+	int ret;
+
+	/* pr->actions->type */
+	type = cpfl_json_object_to_string(cjson_per_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* pr->actions->data */
+	if (strcmp(type, "sem") == 0) {
+		json_object *cjson_fv, *cjson_pr_action_sem;
+
+		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+		cjson_pr_action_sem = json_object_object_get(cjson_per_act, "data");
+		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem, "profile",
+						 &js_act->sem.prof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem, "subprofile",
+						 &js_act->sem.subprof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem, "keysize",
+						 &js_act->sem.keysize);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
+			return -EINVAL;
+		}
+		cjson_fv = json_object_object_get(cjson_pr_action_sem, "fieldvectors");
+		ret = cpfl_flow_js_pattern_act_fv(cjson_fv, js_act);
+		if (ret < 0)
+			return ret;
+	} else {
+		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act(json_object *cjson_pr_act, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len, ret;
+
+	len = json_object_array_length(cjson_pr_act);
+	js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action) * len, 0);
+	if (!js_pr->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->actions_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_pr_action *js_act;
+		json_object *object;
+
+		object = json_object_array_get_idx(cjson_pr_act, i);
+		js_act = &js_pr->actions[i];
+		ret = cpfl_flow_js_pattern_per_act(object, js_act);
+		if (ret < 0) {
+			rte_free(js_pr->actions);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_rule(json_object *json_root, struct cpfl_flow_js_parser *parser)
+{
+	json_object *cjson_pr;
+	int i, len;
+
+	/* Pattern Rules */
+	cjson_pr = json_object_object_get(json_root, "patterns");
+	if (!cjson_pr) {
+		PMD_DRV_LOG(ERR, "The patterns is mandatory.");
+		return -EINVAL;
+	}
+
+	len = json_object_array_length(cjson_pr);
+	parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len, 0);
+	if (!parser->patterns) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	parser->pr_size = len;
+	for (i = 0; i < len; i++) {
+		json_object *object, *cjson_pr_actions, *cjson_pr_key, *cjson_pr_key_proto,
+		    *cjson_pr_key_attr;
+		int ret;
+
+		object = json_object_array_get_idx(cjson_pr, i);
+		/* pr->key */
+		cjson_pr_key = json_object_object_get(object, "key");
+		/* pr->key->protocols */
+		cjson_pr_key_proto = json_object_object_get(cjson_pr_key, "protocols");
+		ret = cpfl_flow_js_pattern_key_proto(cjson_pr_key_proto, &parser->patterns[i]);
+		if (ret < 0) {
+			rte_free(parser->patterns);
+			PMD_DRV_LOG(ERR, "Can not parse key->protocols.");
+			return -EINVAL;
+		}
+		/* pr->key->attributes */
+		cjson_pr_key_attr = json_object_object_get(cjson_pr_key, "attributes");
+		ret = cpfl_flow_js_pattern_key_attr(cjson_pr_key_attr, &parser->patterns[i]);
+		if (ret < 0) {
+			rte_free(parser->patterns);
+			PMD_DRV_LOG(ERR, "Can not parse key->attributes.");
+			return -EINVAL;
+		}
+		/* pr->actions */
+		cjson_pr_actions = json_object_object_get(object, "actions");
+		ret = cpfl_flow_js_pattern_act(cjson_pr_actions, &parser->patterns[i]);
+		if (ret < 0) {
+			rte_free(parser->patterns);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_mr_key(json_object *cjson_mr_key, struct cpfl_flow_js_mr_key *js_mr_key)
+{
+	int len, i;
+
+	len = json_object_array_length(cjson_mr_key);
+	js_mr_key->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_key_action) * len, 0);
+	if (!js_mr_key->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_mr_key->actions_size = len;
+	for (i = 0; i < len; i++) {
+		json_object *object, *cjson_mr_key_data;
+		const char *type;
+		enum rte_flow_action_type act_type;
+
+		object = json_object_array_get_idx(cjson_mr_key, i);
+		/* mr->key->actions->type */
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			rte_free(js_mr_key->actions);
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			return -EINVAL;
+		}
+		act_type = cpfl_get_action_type_by_str(type);
+		if (act_type == RTE_FLOW_ACTION_TYPE_VOID) {
+			rte_free(js_mr_key->actions);
+			return -EINVAL;
+		}
+		js_mr_key->actions[i].type = act_type;
+		/* mr->key->actions->data */
+		cjson_mr_key_data = json_object_object_get(object, "data");
+		if (js_mr_key->actions[i].type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			json_object *cjson_mr_key_proto;
+			int proto_size, j;
+			struct cpfl_flow_js_mr_key_action_vxlan_encap *encap;
+
+			cjson_mr_key_proto = json_object_object_get(cjson_mr_key_data, "protocols");
+			encap = &js_mr_key->actions[i].encap;
+			if (!cjson_mr_key_proto) {
+				encap->proto_size = 0;
+				continue;
+			}
+			proto_size = json_object_array_length(cjson_mr_key_proto);
+			encap->proto_size = proto_size;
+			for (j = 0; j < proto_size; j++) {
+				const char *s;
+				json_object *subobject;
+				enum rte_flow_item_type proto_type;
+
+				subobject = json_object_array_get_idx(cjson_mr_key_proto, j);
+				s = json_object_get_string(subobject);
+				proto_type = cpfl_get_item_type_by_str(s);
+				if (proto_type == RTE_FLOW_ITEM_TYPE_VOID) {
+					rte_free(js_mr_key->actions);
+					PMD_DRV_LOG(ERR, "parse VXLAN_ENCAP failed.");
+					return -EINVAL;
+				}
+				encap->protocols[j] = proto_type;
+			}
+
+		} else {
+			PMD_DRV_LOG(ERR, "not support this type: %d.", js_mr_key->actions[i].type);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_mr_layout(json_object *cjson_layout, struct cpfl_flow_js_mr_action_mod *js_mod)
+{
+	int len, i;
+
+	len = json_object_array_length(cjson_layout);
+	js_mod->layout_size = len;
+	if (len == 0)
+		return 0;
+	js_mod->layout = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_layout) * len, 0);
+	if (!js_mod->layout) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		int index = 0, size = 0, offset = 0, ret;
+		const char *hint;
+
+		object = json_object_array_get_idx(cjson_layout, i);
+		ret = cpfl_json_object_to_int(object, "index", &index);
+		if (ret < 0) {
+			rte_free(js_mod->layout);
+			PMD_DRV_LOG(ERR, "Can not parse 'index'.");
+			return -EINVAL;
+		}
+		js_mod->layout[i].index = index;
+		ret = cpfl_json_object_to_int(object, "size", &size);
+		if (ret < 0) {
+			rte_free(js_mod->layout);
+			PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+			return -EINVAL;
+		}
+		js_mod->layout[i].size = size;
+		ret = cpfl_json_object_to_int(object, "offset", &offset);
+		if (ret < 0) {
+			rte_free(js_mod->layout);
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			return -EINVAL;
+		}
+		js_mod->layout[i].offset = offset;
+		hint = cpfl_json_object_to_string(object, "hint");
+		if (!hint) {
+			rte_free(js_mod->layout);
+			PMD_DRV_LOG(ERR, "Can not parse string 'hint'.");
+			return -EINVAL;
+		}
+		memcpy(js_mod->layout[i].hint, hint, strlen(hint));
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_mr_action(json_object *cjson_mr_act, struct cpfl_flow_js_mr_action *js_mr_act)
+{
+	json_object *cjson_mr_action_data;
+	const char *type;
+
+	/* mr->action->type */
+	type = cpfl_json_object_to_string(cjson_mr_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+
+	/* mr->action->data */
+	cjson_mr_action_data = json_object_object_get(cjson_mr_act, "data");
+	if (strcmp(type, "mod") == 0) {
+		json_object *layout;
+		uint16_t profile = 0;
+		int ret;
+
+		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		ret = cpfl_json_object_to_uint16(cjson_mr_action_data, "profile", &profile);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		js_mr_act->mod.prof = profile;
+		layout = json_object_object_get(cjson_mr_action_data, "layout");
+		ret = cpfl_flow_js_mr_layout(layout, &js_mr_act->mod);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse layout.");
+			return ret;
+		}
+	} else  {
+		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_mod_rule(json_object *json_root, struct cpfl_flow_js_parser *parser)
+{
+	json_object *cjson_mr;
+	int i, len;
+
+	cjson_mr = json_object_object_get(json_root, "modifications");
+	if (!cjson_mr) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	len = json_object_array_length(cjson_mr);
+	parser->mr_size = len;
+	if (len == 0)
+		return 0;
+	parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr) * len, 0);
+	if (!parser->modifications) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		int ret;
+		json_object *object, *cjson_mr_key, *cjson_mr_action, *cjson_mr_key_action;
+
+		object = json_object_array_get_idx(cjson_mr, i);
+		/* mr->key */
+		cjson_mr_key = json_object_object_get(object, "key");
+		/* mr->key->actions */
+		cjson_mr_key_action = json_object_object_get(cjson_mr_key, "actions");
+
+		ret = cpfl_flow_js_mr_key(cjson_mr_key_action, &parser->modifications[i].key);
+		if (ret < 0) {
+			rte_free(parser->modifications);
+			PMD_DRV_LOG(ERR, "parse mr_key failed.");
+			return -EINVAL;
+		}
+		/* mr->action */
+		cjson_mr_action = json_object_object_get(object, "action");
+		ret = cpfl_flow_js_mr_action(cjson_mr_action, &parser->modifications[i].action);
+		if (ret < 0) {
+			rte_free(parser->modifications);
+			PMD_DRV_LOG(ERR, "parse mr_action failed.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parser_init(json_object *json_root, struct cpfl_flow_js_parser *parser)
+{
+	int ret = 0;
+
+	ret = cpfl_flow_js_pattern_rule(json_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
+		return ret;
+	}
+	ret = cpfl_flow_js_mod_rule(json_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
+		return ret;
+	}
+
+	return ret;
+}
+
+int
+cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char *filename)
+{
+	struct cpfl_flow_js_parser *parser;
+	json_object *root;
+	int ret;
+
+	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser), 0);
+	if (!parser) {
+		PMD_DRV_LOG(ERR, "Not enough memory to create flow parser.");
+		return -ENOMEM;
+	}
+	root = json_object_from_file(filename);
+	if (!root) {
+		PMD_DRV_LOG(ERR, "Can not load JSON file: %s.", filename);
+		rte_free(parser);
+		return -EINVAL;
+	}
+	ret = cpfl_parser_init(root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parser init failed.");
+		rte_free(parser);
+		return -EINVAL;
+	}
+	*flow_parser = parser;
+
+	ret = json_object_put(root);
+	if (ret != 1) {
+		PMD_DRV_LOG(ERR, "Free json_object failed.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
+{
+	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		if (pr_act->sem.fv)
+			rte_free(pr_act->sem.fv);
+	}
+}
+
+int
+cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
+{
+	int i, j;
+
+	for (i = 0; i < parser->pr_size; i++) {
+		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
+
+		for (j = 0; j < pattern->key.proto_size; j++) {
+			if (pattern->key.protocols[j].fields)
+				rte_free(pattern->key.protocols[j].fields);
+		}
+		if (pattern->key.protocols)
+			rte_free(pattern->key.protocols);
+
+		if (pattern->key.attributes)
+			rte_free(pattern->key.attributes);
+
+		for (j = 0; j < pattern->actions_size; j++) {
+			struct cpfl_flow_js_pr_action *pr_act;
+
+			pr_act = &pattern->actions[j];
+			cpfl_parser_free_pr_action(pr_act);
+		}
+
+		if (pattern->actions)
+			rte_free(pattern->actions);
+	}
+	if (parser->patterns)
+		rte_free(parser->patterns);
+
+	for (i = 0; i < parser->mr_size; i++) {
+		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
+
+		if (mr->key.actions)
+			rte_free(mr->key.actions);
+		if (mr->action.type == CPFL_JS_MR_ACTION_TYPE_MOD && mr->action.mod.layout)
+			rte_free(mr->action.mod.layout);
+	}
+	if (parser->modifications)
+		rte_free(parser->modifications);
+
+	rte_free(parser);
+	return 0;
+}
+
+static int
+cpfl_get_items_length(const struct rte_flow_item *items)
+{
+	int length = 0;
+	const struct rte_flow_item *item = items;
+
+	while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_get_actions_length(const struct rte_flow_action *actions)
+{
+	int length = 0;
+	const struct rte_flow_action *action = actions;
+
+	while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
+		       uint16_t offset, uint8_t *fv)
+{
+	uint16_t v_layer, v_offset, v_mask;
+	enum rte_flow_item_type v_header;
+	int j, layer, length;
+	uint16_t temp_fv;
+
+	length = cpfl_get_items_length(items);
+
+	v_layer = js_fv->proto.layer;
+	v_header = js_fv->proto.header;
+	v_offset = js_fv->proto.offset;
+	v_mask = js_fv->proto.mask;
+	layer = 0;
+	for (j = 0; j < length - 1; j++) {
+		if (items[j].type == v_header) {
+			if (layer == v_layer) {
+				/* copy out 16 bits from offset */
+				const uint8_t *pointer;
+
+				pointer = &(((const uint8_t *)(items[j].spec))[v_offset]);
+				temp_fv = ntohs((*((const uint16_t *)pointer)) & v_mask);
+				fv[2 * offset] = (uint8_t)((temp_fv & 0xff00) >> 8);
+				fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+				break;
+			}
+			layer++;
+		} /* TODO: more type... */
+	}
+	return 0;
+}
+
+static int
+cpfl_parse_fieldvectors(struct cpfl_flow_js_fv *js_fvs, int size, uint8_t *fv,
+			const struct rte_flow_item *items)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		uint16_t offset, temp_fv, value_int;
+		enum cpfl_flow_js_fv_type type;
+		struct cpfl_flow_js_fv *js_fv;
+
+		js_fv = &js_fvs[i];
+		offset = js_fv->offset;
+		type = js_fv->type;
+		/* type = int */
+		if (type == CPFL_FV_TYPE_IMMEDIATE) {
+			value_int = js_fv->immediate;
+			temp_fv = (value_int << 8) & 0xff00;
+			fv[2 * offset] = (uint8_t)((temp_fv & 0xff00) >> 8);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+		} else if (type == CPFL_FV_TYPE_PROTOCOL) {
+			ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv);
+			if (ret)
+				return ret;
+		} else {
+			PMD_DRV_LOG(DEBUG, "not support this type: %d.", type);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_parse_pr_actions(struct cpfl_flow_js_pr_action *actions,
+		      int size,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		struct cpfl_flow_js_pr_action *pr_act;
+		enum cpfl_flow_pr_action_type type;
+
+		pr_act = &actions[i];
+		/* pr->actions->type */
+		type = pr_act->type;
+		/* pr->actions->data */
+		if (attr->group % 10 == 1  && type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+			struct cpfl_flow_js_pr_action_sem *sem = &pr_act->sem;
+
+			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+			pr_action->sem.prof = sem->prof;
+			pr_action->sem.subprof = sem->subprof;
+			pr_action->sem.keysize = sem->keysize;
+			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
+			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
+			ret = cpfl_parse_fieldvectors(sem->fv, sem->fv_size,
+						      pr_action->sem.cpfl_flow_pr_fv, items);
+			return ret;
+		} else if (attr->group > 4 || attr->group == 0) {
+			return -EPERM;
+		}
+	}
+	return 0;
+}
+
+static int
+str2MAC(const char *mask, uint8_t *addr_bytes)
+{
+	int i, size, j;
+	uint8_t n;
+
+	size = strlen(mask);
+	n = 0;
+	j = 0;
+	for (i = 0; i < size; i++) {
+		char ch = mask[i];
+
+		if (ch == ':') {
+			if (j >= RTE_ETHER_ADDR_LEN)
+				return -EINVAL;
+			addr_bytes[j++] = n;
+			n = 0;
+		} else if (ch >= 'a' && ch <= 'f') {
+			n = n * 16 + ch - 'a' + 10;
+		} else if (ch >= 'A' && ch <= 'F') {
+			n = n * 16 + ch - 'A' + 10;
+		} else if (ch >= '0' && ch <= '9') {
+			n = n * 16 + ch - '0';
+		} else {
+			return -EINVAL;
+		}
+	}
+	if (j < RTE_ETHER_ADDR_LEN)
+		addr_bytes[j++] = n;
+
+	if (j != RTE_ETHER_ADDR_LEN)
+		return -EINVAL;
+	return 0;
+}
+
+static int
+cpfl_check_eth_mask(const char *mask, const uint8_t addr_bytes[RTE_ETHER_ADDR_LEN])
+{
+	int i, ret;
+	uint8_t mask_bytes[RTE_ETHER_ADDR_LEN] = {0};
+
+	ret = str2MAC(mask, mask_bytes);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "string to mac address failed.");
+		return -EINVAL;
+	}
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+		if (mask_bytes[i] != addr_bytes[i])
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
+{
+	uint32_t out_addr;
+
+	/* success return 0; invalid return -EINVAL; fail return -ENOTSUP */
+	int ret = inet_pton(AF_INET, mask, &out_addr);
+
+	if (ret < 0)
+		return -EINVAL;
+
+	if (out_addr != addr)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_eth *eth_mask)
+{
+	int field_size, j;
+	int flag_dst_addr, flag_src_addr, flag_ether_type;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	/* eth_mask->dst.addr_bytes */
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !eth_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_ether_type = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name, *s_mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst, more see Field Mapping
+		 */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		if (strcmp(name, "src_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->src.addr_bytes) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->dst.addr_bytes) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "ether_type") == 0) {
+			uint16_t mask = (uint16_t)field->mask_32b;
+
+			if (mask != eth_mask->type)
+				return -EINVAL;
+			flag_ether_type = true;
+		} else {
+			/* TODO: more type... */
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (strcmp((const char *)eth_mask->src.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (strcmp((const char *)eth_mask->dst.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_ether_type) {
+		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+	if (proto) {
+		int field_size, j;
+		int flag_next_proto_id, flag_src_addr, flag_dst_addr;
+		struct cpfl_flow_js_pr_key_proto_field *field;
+
+		field_size = proto->fields_size;
+		if (field_size != 0 && !ipv4_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && ipv4_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && !ipv4_mask)
+			return 0;
+
+		flag_dst_addr = false;
+		flag_src_addr = false;
+		flag_next_proto_id = false;
+		for (j = 0; j < field_size; j++) {
+			const char *name;
+
+			field = &proto->fields[j];
+			name = field->name;
+			if (strcmp(name, "src_addr") == 0) {
+				/* match: rte_flow_item->mask */
+				const char *mask;
+
+				mask = field->mask;
+				if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.src_addr) < 0)
+					return -EINVAL;
+				flag_src_addr = true;
+			} else if (strcmp(name, "dst_addr") == 0) {
+				const char *mask;
+
+				mask = field->mask;
+				if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.dst_addr) < 0)
+					return -EINVAL;
+				flag_dst_addr = true;
+			} else if (strcmp(name, "next_proto_id") == 0) {
+				uint8_t mask;
+
+				mask = (uint8_t)field->mask_32b;
+				if (mask != ipv4_mask->hdr.next_proto_id)
+					return -EINVAL;
+				flag_next_proto_id = true;
+			} else {
+				PMD_DRV_LOG(ERR, "not support this name.");
+				return -EINVAL;
+			}
+		}
+		if (!flag_src_addr) {
+			if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
+				return -EINVAL;
+		}
+		if (!flag_dst_addr) {
+			if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
+				return -EINVAL;
+		}
+		if (!flag_next_proto_id) {
+			if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
+				return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_tcp *tcp_mask)
+{
+	if (proto) {
+		int field_size, j;
+		int flag_src_port, flag_dst_port;
+		struct cpfl_flow_js_pr_key_proto_field *field;
+
+		field_size = proto->fields_size;
+		if (field_size != 0 && !tcp_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && tcp_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && !tcp_mask)
+			return 0;
+
+		flag_src_port = false;
+		flag_dst_port = false;
+		for (j = 0; j < field_size; j++) {
+			const char *name;
+			uint16_t mask;
+
+			field = &proto->fields[j];
+			/* match: rte_flow_item_eth.dst */
+			name = field->name;
+			/* match: rte_flow_item->mask */
+			mask = (uint16_t)field->mask_32b;
+			if (strcmp(name, "src_port") == 0) {
+				if (tcp_mask->hdr.src_port != mask)
+					return -EINVAL;
+				flag_src_port = true;
+			} else if (strcmp(name, "dst_port") == 0) {
+				if (tcp_mask->hdr.dst_port != mask)
+					return -EINVAL;
+				flag_dst_port = true;
+			} else {
+				PMD_DRV_LOG(ERR, "not support this name.");
+				return -EINVAL;
+			}
+		}
+		if (!flag_src_port) {
+			if (tcp_mask->hdr.src_port != (rte_be16_t)0)
+				return -EINVAL;
+		}
+		if (!flag_dst_port) {
+			if (tcp_mask->hdr.dst_port != (rte_be16_t)0)
+				return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_udp *udp_mask)
+{
+	if (proto) {
+		int field_size, j;
+		bool flag_src_port, flag_dst_port;
+		struct cpfl_flow_js_pr_key_proto_field *field;
+
+		field_size = proto->fields_size;
+		if (field_size != 0 && !udp_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && udp_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && !udp_mask)
+			return 0;
+
+		flag_src_port = false;
+		flag_dst_port = false;
+		for (j = 0; j < field_size; j++) {
+			const char *name;
+			uint16_t mask;
+
+			field = &proto->fields[j];
+			/* match: rte_flow_item_eth.dst */
+			name = field->name; /* match: rte_flow_item->mask */
+			mask = (uint16_t)field->mask_32b;
+			if (strcmp(name, "src_port") == 0) {
+				if (udp_mask->hdr.src_port != mask)
+					return -EINVAL;
+				flag_src_port = true;
+			} else if (strcmp(name, "dst_port") == 0) {
+				if (udp_mask->hdr.dst_port != mask)
+					return -EINVAL;
+				flag_dst_port = true;
+			} else {
+				PMD_DRV_LOG(ERR, "not support this name.");
+				return -EINVAL;
+			}
+		}
+		if (!flag_src_port) {
+			if (udp_mask->hdr.src_port != (rte_be16_t)0)
+				return -EINVAL;
+		}
+		if (!flag_dst_port) {
+			if (udp_mask->hdr.dst_port != (rte_be16_t)0)
+				return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto,
+		 const struct rte_flow_item_vxlan *vxlan_mask)
+{
+	if (proto) {
+		int field_size, j;
+		struct cpfl_flow_js_pr_key_proto_field *field;
+
+		field_size = proto->fields_size;
+		if (field_size != 0 && !vxlan_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && vxlan_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && !vxlan_mask)
+			return 0;
+
+		for (j = 0; j < field_size; j++) {
+			const char *name;
+			int64_t mask;
+
+			field = &proto->fields[j];
+			name = field->name;
+			/* match: rte_flow_item->mask */
+			mask = (int64_t)field->mask_32b;
+			if (strcmp(name, "vx_vni") == 0) {
+				if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) != mask)
+					return -EINVAL;
+			} else {
+				PMD_DRV_LOG(ERR, "not support this name.");
+				return -EINVAL;
+			}
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_icmp *icmp_mask)
+{
+	if (proto) {
+		int field_size;
+
+		field_size = proto->fields_size;
+		if (field_size != 0 && !icmp_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && icmp_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && !icmp_mask)
+			return 0;
+	}
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
+			     int proto_size,
+			     const struct rte_flow_item *items)
+{
+	int i, length, j = 0;
+
+	length = cpfl_get_items_length(items);
+
+	if (proto_size > length - 1)
+		return -EINVAL;
+
+	for (i = 0; i < proto_size; i++) {
+		struct cpfl_flow_js_pr_key_proto *key_proto;
+		enum rte_flow_item_type type;
+
+		key_proto = &protocols[i];
+		/* pr->key->proto->type */
+		type = key_proto->type;
+		/* pr->key->proto->fields */
+		switch (type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
+				const struct rte_flow_item_eth *eth_mask;
+				int ret;
+
+				eth_mask = (const struct rte_flow_item_eth *)items[i].mask;
+				ret = cpfl_check_eth(key_proto, eth_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				const struct rte_flow_item_ipv4 *ipv4_mask;
+				int ret;
+
+				ipv4_mask = (const struct rte_flow_item_ipv4 *)items[i].mask;
+				ret = cpfl_check_ipv4(key_proto, ipv4_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) {
+				const struct rte_flow_item_tcp *tcp_mask;
+				int ret;
+
+				tcp_mask = (const struct rte_flow_item_tcp *)items[i].mask;
+				ret = cpfl_check_tcp(key_proto, tcp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) {
+				const struct rte_flow_item_udp *udp_mask;
+				int ret;
+
+				udp_mask = (const struct rte_flow_item_udp *)items[i].mask;
+				ret = cpfl_check_udp(key_proto, udp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+				const struct rte_flow_item_vxlan *vxlan_mask;
+				int ret;
+
+				vxlan_mask = (const struct rte_flow_item_vxlan *)items[i].mask;
+				ret = cpfl_check_vxlan(key_proto, vxlan_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) {
+				const struct rte_flow_item_icmp *icmp_mask;
+				int ret;
+
+				icmp_mask = (const struct rte_flow_item_icmp *)items[i].mask;
+				ret = cpfl_check_icmp(key_proto, icmp_mask);
+				if (ret < 0)
+					return ret;
+
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+	if (items[j].type != RTE_FLOW_ITEM_TYPE_END)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr,
+			    const struct rte_flow_attr *attr)
+{
+	/* match: struct rte_flow_attr(ingress,egress) */
+	if (key_attr->ingress != attr->ingress) {
+		PMD_DRV_LOG(DEBUG, "ingress not match.");
+		return -EINVAL;
+	}
+	if (key_attr->egress != attr->egress) {
+		PMD_DRV_LOG(DEBUG, "egress not match.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern,
+		       const struct rte_flow_item *items,
+		       const struct rte_flow_attr *attr)
+{
+	int ret;
+
+	/* pr->key */
+
+	/* pr->key->protocols */
+	ret = cpfl_check_pattern_key_proto(pattern->key.protocols,
+					   pattern->key.proto_size, items);
+	if (ret < 0)
+		return -EINVAL;
+
+	/* pr->key->attributes */
+	ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr);
+	if (ret < 0)
+		return -EINVAL;
+	return 0;
+}
+
+/* output: struct cpfl_flow_pr_action* pr_action */
+static int
+cpfl_parse_pattern_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_item *items,
+			 const struct rte_flow_attr *attr,
+			 struct cpfl_flow_pr_action *pr_action)
+{
+	int i, size;
+	struct cpfl_flow_js_pr *pattern;
+
+	size = parser->pr_size;
+	for (i = 0; i < size; i++) {
+		int ret;
+
+		pattern = &parser->patterns[i];
+		ret = cpfl_check_pattern_key(pattern, items, attr);
+		if (ret < 0)
+			continue;
+		/* pr->actions */
+		ret = cpfl_parse_pr_actions(pattern->actions, pattern->actions_size, items, attr,
+					    pr_action);
+		return ret;
+	}
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_items(struct cpfl_flow_js_parser *parser, const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int ret;
+
+	/* Pattern Rules */
+	ret = cpfl_parse_pattern_rules(parser, items, attr, pr_action);
+	return ret;
+}
+
+/* modifications rules */
+static int
+cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap *encap,
+			       const struct rte_flow_action *action)
+{
+	const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+	struct rte_flow_item *definition;
+	int def_length, i, proto_size;
+
+	action_vxlan_encap = (const struct rte_flow_action_vxlan_encap *)action->conf;
+	definition = action_vxlan_encap->definition;
+	def_length = cpfl_get_items_length(definition);
+	proto_size = encap->proto_size;
+	if (proto_size != def_length - 1) {
+		PMD_DRV_LOG(DEBUG, "protocols not match.");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < proto_size; i++) {
+		enum rte_flow_item_type proto;
+
+		proto = encap->protocols[i];
+		if (proto == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) {
+				PMD_DRV_LOG(DEBUG, "protocols not match.");
+				return -EINVAL;
+			}
+		} else if (proto != definition[i].type) {
+			PMD_DRV_LOG(DEBUG, "protocols not match.");
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+/* output: struct cpfl_flow_mr_key_action *mr_key_action */
+/* check and parse */
+static int
+cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int size,
+			 const struct rte_flow_action *actions,
+			 struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int actions_length, i;
+	int j = 0;
+	int ret;
+
+	actions_length = cpfl_get_actions_length(actions);
+	if (size > actions_length - 1)
+		return -EINVAL;
+
+	for (i = 0; i < size; i++) {
+		enum rte_flow_action_type type;
+		struct cpfl_flow_js_mr_key_action *key_act;
+
+		key_act = &key_acts[i];
+		/* mr->key->actions->type */
+		type = key_act->type;
+		/* mr->key->actions->data */
+		/* match: <type> action matches RTE_FLOW_ACTION_TYPE_<type> */
+
+		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			int proto_size, k;
+			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
+
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+			mr_key_action[i].encap.action = &actions[j];
+			encap = &mr_key_action[i].encap;
+
+			proto_size = key_act->encap.proto_size;
+			encap->proto_size = proto_size;
+			for (k = 0; k < proto_size; k++) {
+				enum rte_flow_item_type proto;
+
+				proto = key_act->encap.protocols[k];
+				encap->protocols[k] = proto;
+			}
+
+			ret = cpfl_check_actions_vxlan_encap(encap, &actions[j]);
+			if (ret < 0)
+				return -EINVAL;
+
+			j++;
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+/* output: uint8_t *buffer, uint16_t *byte_len */
+static int
+cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
+		  struct cpfl_flow_mr_key_action *mr_key_action,
+		  uint8_t *buffer, uint16_t *byte_len)
+{
+	int i, start = 0;
+
+	for (i = 0; i < layout_size; i++) {
+		int index, size, offset;
+		const char *hint;
+		const uint8_t *addr;
+		struct cpfl_flow_mr_key_action *temp;
+		struct cpfl_flow_js_mr_layout *layout;
+
+		layout = &layouts[i];
+		/* index links to the element of the actions array. */
+		index = layout->index;
+		size = layout->size;
+		offset = layout->offset;
+		if (index == -1) {
+			hint = "dummpy";
+			start += size;
+			continue;
+		}
+		hint = layout->hint;
+		addr = NULL;
+		temp = mr_key_action + index;
+
+		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+			struct rte_flow_item *definition;
+			int def_length, k;
+
+			action_vxlan_encap =
+			    (const struct rte_flow_action_vxlan_encap *)temp->encap.action->conf;
+			definition = action_vxlan_encap->definition;
+			def_length = cpfl_get_items_length(definition);
+			for (k = 0; k < def_length - 1; k++) {
+				if ((strcmp(hint, "eth") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_ETH) ||
+				    (strcmp(hint, "ipv4") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+				    (strcmp(hint, "udp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_UDP) ||
+				    (strcmp(hint, "tcp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_TCP) ||
+				    (strcmp(hint, "vxlan") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_VXLAN)) {
+					addr = (const uint8_t *)(definition[k].spec);
+					if (start > 255) {
+						*byte_len = 0;
+						PMD_DRV_LOG(ERR, "byte length is too long%s",
+							    hint);
+						return -EINVAL;
+					}
+					memcpy(buffer + start, addr + offset, size);
+					break;
+				} /* TODO: more hint... */
+			}
+			if (k == def_length - 1) {
+				*byte_len = 0;
+				PMD_DRV_LOG(ERR, "can not find corresponding hint: %s", hint);
+				return -EINVAL;
+			}
+		} else {
+			*byte_len = 0;
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", temp->type);
+			return -EINVAL;
+		}
+		/* else TODO: more type... */
+
+		start += size;
+	}
+	*byte_len = start;
+	return 0;
+}
+
+static int
+cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action,
+		     struct cpfl_flow_mr_key_action *mr_key_action,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	enum cpfl_flow_mr_action_type type;
+
+	/* mr->action->type */
+	type = action->type;
+	/* mr->action->data */
+	if (type == CPFL_JS_MR_ACTION_TYPE_MOD) {
+		struct cpfl_flow_js_mr_layout *layout;
+
+		mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		mr_action->mod.byte_len = 0;
+		mr_action->mod.prof = action->mod.prof;
+		layout = action->mod.layout;
+		if (layout) {
+			int ret;
+
+			memset(mr_action->mod.data, 0, sizeof(mr_action->mod.data));
+			ret = cpfl_parse_layout(layout, action->mod.layout_size, mr_key_action,
+						mr_action->mod.data, &mr_action->mod.byte_len);
+			if (ret < 0)
+				return -EINVAL;
+		}
+		return 0;
+	}
+
+	PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+	return -EINVAL;
+}
+
+static int
+cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action *actions,
+		   struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int key_action_size;
+
+	/* mr->key->actions */
+	key_action_size = mr->key.actions_size;
+	return cpfl_parse_mr_key_action(mr->key.actions, key_action_size, actions, mr_key_action);
+}
+
+/* output: struct cpfl_flow_mr_action *mr_action */
+static int
+cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+#define CPFL_MOD_KEY_NUM_MAX 8
+	int i, size;
+	struct cpfl_flow_mr_key_action mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0};
+
+	size = parser->mr_size;
+
+	for (i = 0; i < size; i++) {
+		int ret;
+		struct cpfl_flow_js_mr *mr;
+
+		mr = &parser->modifications[i];
+		ret = cpfl_check_mod_key(mr, actions, mr_key_action);
+		if (ret < 0)
+			continue;
+		/* mr->action */
+		ret = cpfl_parse_mr_action(&mr->action, mr_key_action, mr_action);
+		if (!ret)
+			return 0;
+	}
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+			struct cpfl_flow_mr_action *mr_action)
+{
+	/* modifications rules */
+	if (!parser->modifications) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	return cpfl_parse_mod_rules(parser, actions, mr_action);
+}
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
new file mode 100644
index 0000000000..af33051ce2
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <json-c/json.h>
+#include <rte_flow.h>
+
+#ifndef _CPFL_FLOW_PARSER_H_
+#define _CPFL_FLOW_PARSER_H_
+
+#define CPFL_FLOW_JSON_STR_SIZE_MAX 100
+
+/* Pattern Rules Storage Begin*/
+enum cpfl_flow_pr_action_type {
+	CPFL_JS_PR_ACTION_TYPE_SEM,
+	CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
+};
+
+struct cpfl_flow_js_pr_key_attr {
+	uint16_t ingress;
+	uint16_t egress;
+};
+
+struct cpfl_flow_js_pr_key_proto_field {
+	char name[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	union {
+		char mask[CPFL_FLOW_JSON_STR_SIZE_MAX];
+		uint32_t mask_32b;
+	};
+};
+
+struct cpfl_flow_js_pr_key_proto {
+	enum rte_flow_item_type type;
+	struct cpfl_flow_js_pr_key_proto_field *fields;
+	int fields_size;
+};
+
+enum cpfl_flow_js_fv_type {
+	CPFL_FV_TYPE_PROTOCOL,
+	CPFL_FV_TYPE_IMMEDIATE,
+	CPFL_FV_TYPE_UNKNOWN = -1,
+
+};
+
+struct cpfl_flow_js_fv {
+	uint16_t offset;
+	enum cpfl_flow_js_fv_type type;
+	union {
+		uint16_t immediate;
+		struct {
+			uint16_t layer;
+			enum rte_flow_item_type header;
+			uint16_t offset;
+			uint16_t mask;
+		} proto;
+	};
+};
+
+#define CPFL_MAX_SEM_FV_KEY_SIZE 64
+struct cpfl_flow_js_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	struct cpfl_flow_js_fv *fv;
+	int fv_size;
+};
+
+struct cpfl_flow_js_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_js_pr_action_sem sem;
+	};
+};
+
+struct cpfl_flow_js_pr {
+	struct {
+		struct cpfl_flow_js_pr_key_proto *protocols;
+		uint16_t proto_size;
+		struct cpfl_flow_js_pr_key_attr *attributes;
+		uint16_t attr_size;
+	} key;
+	struct cpfl_flow_js_pr_action *actions;
+	uint16_t actions_size;
+};
+
+/* Pattern Rules Storage End */
+
+/* Modification Rules Storage Begin */
+#define CPFL_FLOW_JS_PROTO_SIZE 16
+struct cpfl_flow_js_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	int proto_size;
+};
+
+struct cpfl_flow_js_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_js_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_js_mr_key {
+	struct cpfl_flow_js_mr_key_action *actions;
+	int actions_size;
+};
+
+struct cpfl_flow_js_mr_layout {
+	int index;
+	char hint[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	uint16_t offset;
+	uint16_t size;
+};
+
+struct cpfl_flow_js_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	struct cpfl_flow_js_mr_layout *layout;
+	int layout_size;
+};
+
+enum cpfl_flow_mr_action_type {
+	CPFL_JS_MR_ACTION_TYPE_MOD,
+};
+
+struct cpfl_flow_js_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_js_mr_action_mod mod;
+	};
+};
+
+struct cpfl_flow_js_mr {
+	struct cpfl_flow_js_mr_key key;
+	struct cpfl_flow_js_mr_action action;
+};
+
+/* Modification Rules Storage End */
+
+struct cpfl_flow_js_parser {
+	struct cpfl_flow_js_pr *patterns;
+	int pr_size;
+	struct cpfl_flow_js_mr *modifications;
+	int mr_size;
+};
+
+/* Pattern Rules Begin */
+struct cpfl_flow_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	uint8_t cpfl_flow_pr_fv[CPFL_MAX_SEM_FV_KEY_SIZE];
+};
+
+struct cpfl_flow_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_pr_action_sem sem;
+	};
+};
+
+/* Pattern Rules End */
+
+/* Modification Rules Begin */
+struct cpfl_flow_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	uint16_t proto_size;
+	const struct rte_flow_action *action;
+};
+
+struct cpfl_flow_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	uint8_t data[256];
+};
+
+struct cpfl_flow_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_mr_action_mod mod;
+	};
+};
+
+/* Modification Rules End */
+
+struct cpfl_pipeline_stage {
+	int stage;
+	int recircle;
+};
+
+int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
+int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
+int cpfl_flow_parse_items(struct cpfl_flow_js_parser *parser,
+			  const struct rte_flow_item *items,
+			  const struct rte_flow_attr *attr,
+			  struct cpfl_flow_pr_action *pr_action);
+int cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser,
+			    const struct rte_flow_action *actions,
+			    struct cpfl_flow_mr_action *mr_action);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 0be25512c3..7b8d043011 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -41,6 +41,9 @@ endif
 
 js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
+    sources += files(
+        'cpfl_flow_parser.c',
+    )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
 endif
\ No newline at end of file
-- 
2.34.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 3/4] net/cpfl: introduce CPF common library
  2023-08-11  9:30 [PATCH 0/4] net/cpfl: add basic support for rte_flow Wenjing Qiao
  2023-08-11  9:30 ` [PATCH 1/4] net/cpfl: parse flow parser file in devargs Wenjing Qiao
  2023-08-11  9:30 ` [PATCH 2/4] net/cpfl: add flow json parser Wenjing Qiao
@ 2023-08-11  9:30 ` Wenjing Qiao
  2023-08-24  9:19   ` Xing, Beilei
  2023-08-11  9:30 ` [PATCH 4/4] net/cpfl: setup ctrl path Wenjing Qiao
  3 siblings, 1 reply; 15+ messages in thread
From: Wenjing Qiao @ 2023-08-11  9:30 UTC (permalink / raw)
  To: yuying.zhang, beilei.xing; +Cc: dev, mingxia.liu, Wenjing Qiao

Add common library support for CPFL rte_flow to
create/delete rules.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_actions.h  | 858 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.c | 380 ++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  51 ++
 drivers/net/cpfl/cpfl_rules.c    | 126 +++++
 drivers/net/cpfl/cpfl_rules.h    | 306 +++++++++++
 drivers/net/cpfl/meson.build     |   2 +
 6 files changed, 1723 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h
new file mode 100644
index 0000000000..1481c911fe
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_actions.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_ACTIONS_H_
+#define _CPFL_ACTIONS_H_
+
+#include "base/idpf_osdep.h"
+
+#pragma pack(1)
+
+union cpfl_action_set {
+	uint32_t data;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 4;
+		uint32_t tag : 1;
+		uint32_t prec : 3;
+	} set_24b_a;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 3;
+		uint32_t tag : 2;
+		uint32_t prec : 3;
+	} set_24b_b;
+
+	struct {
+		uint32_t val : 16;
+		uint32_t idx : 4;
+		uint32_t unused : 6;
+		uint32_t tag : 3;
+		uint32_t prec : 3;
+	} set_16b;
+
+	struct {
+		uint32_t val_a : 8;
+		uint32_t val_b : 8;
+		uint32_t idx_a : 4;
+		uint32_t idx_b : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_8b;
+
+	struct {
+		uint32_t val : 10;
+		uint32_t ena : 10;
+		uint32_t idx : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_1b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} nop;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} chained_24b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} aux_flags;
+};
+
+struct cpfl_action_set_ext {
+#define CPFL_ACTION_SET_EXT_CNT 2
+	union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT];
+};
+
+#pragma pack()
+
+/**
+ * cpfl_act_nop - Encode a NOP action
+ */
+static inline union cpfl_action_set
+cpfl_act_nop(void)
+{
+	union cpfl_action_set act;
+
+	act.data = 0;
+	return act;
+}
+
+/**
+ * cpfl_is_nop_action - Indicate if an action set is a NOP
+ */
+static inline bool
+cpfl_is_nop_action(union cpfl_action_set *act)
+{
+	return act->data == cpfl_act_nop().data;
+}
+
+#define CPFL_MAKE_MASK32(b, s)	((((uint32_t)1 << (b)) - 1) << (s))
+
+#define CPFL_ACT_PREC_MAX	7
+#define CPFL_ACT_PREC_S		29
+#define CPFL_ACT_PREC_M		CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S)
+#define CPFL_ACT_PREC_SET(p)	\
+	(((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M)
+#define CPFL_ACT_PREC_CHECK(p)	((p) > 0 && (p) <= CPFL_ACT_PREC_MAX)
+
+#define CPFL_METADATA_ID_CNT		32	/* Max number of metadata IDs */
+#define CPFL_METADATA_STRUCT_MAX_SZ	128	/* Max metadata size per ID */
+
+/*******************************************************************************
+ * 1-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_1B_OP_S	24
+#define CPFL_ACT_1B_OP_M	CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S)
+#define CPFL_ACT_1B_OP		((uint32_t)(0x01) << CPFL_ACT_1B_OP_S)
+
+#define CPFL_ACT_1B_VAL_S	0
+#define CPFL_ACT_1B_VAL_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S)
+#define CPFL_ACT_1B_EN_S	10
+#define CPFL_ACT_1B_EN_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S)
+#define CPFL_ACT_1B_INDEX_S	20
+#define CPFL_ACT_1B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S)
+
+/* 1-bit actions currently uses only INDEX of 0 */
+#define CPFL_ACT_MAKE_1B(prec, en, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \
+	 ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \
+	 (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M))
+
+enum cpfl_act_1b_op {
+	CPFL_ACT_1B_OP_DROP		= 0x01,
+	CPFL_ACT_1B_OP_HDR_SPLIT	= 0x02,
+	CPFL_ACT_1B_OP_DIR_CHANGE	= 0x04,
+	CPFL_ACT_1B_OP_DEFER_DROP	= 0x08,
+	CPFL_ACT_1B_OP_ORIG_MIR_MD	= 0x80
+};
+
+#define CPFL_ACT_1B_COMMIT_MODE_S	4
+#define CPFL_ACT_1B_COMMIT_MODE_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S)
+
+/**
+ * cpfl_act_commit_mode - action commit mode for certain action classes
+ */
+enum cpfl_act_commit_mode {
+	/* Action processing for the initial classification pass */
+	CPFL_ACT_COMMIT_ALL		= 0, /* Commit all actions */
+	CPFL_ACT_COMMIT_PRE_MOD		= 1, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_NONE		= 2, /* Commit no action */
+	/* Action processing for deferred actions in a recirculation pass */
+	CPFL_ACT_COMMIT_RECIR_ALL	= 4, /* Commit all actions */
+	CPFL_ACT_COMMIT_RECIR_PRE_MOD	= 5, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_RECIR_NONE	= 6  /* Commit no action */
+};
+
+/*******************************************************************************
+ * 8-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_8B_S	24
+#define CPFL_ACT_OP_8B_M	CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S)
+#define CPFL_ACT_OP_8B		((uint32_t)(0x02) << CPFL_ACT_OP_8B_S)
+
+#define CPFL_ACT_8B_A_VAL_S	0
+#define CPFL_ACT_8B_A_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S)
+#define CPFL_ACT_8B_A_INDEX_S	16
+#define CPFL_ACT_8B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S)
+
+#define CPFL_ACT_8B_B_VAL_S	8
+#define CPFL_ACT_8B_B_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S)
+#define CPFL_ACT_8B_B_INDEX_S	20
+#define CPFL_ACT_8B_B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S)
+
+/* Unless combining two 8-bit actions into an action set, both A and B fields
+ * must be the same,
+ */
+#define CPFL_ACT_MAKE_8B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \
+	 (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \
+	 (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \
+	 (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \
+	 (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M))
+
+/* 8-Bit Action Indices */
+#define CPFL_ACT_8B_INDEX_MOD_META		9
+
+/* 8-Bit Action Miscellaneous */
+#define CPFL_ACT_8B_MOD_META_PROF_CNT		16
+#define CPFL_ACT_8B_MOD_META_VALID		0x80
+
+/*******************************************************************************
+ * 16-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_16B_S	26
+#define CPFL_ACT_OP_16B_M	CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S)
+#define CPFL_ACT_OP_16B		((uint32_t)0x1 << CPFL_ACT_OP_16B_S)
+
+#define CPFL_ACT_16B_INDEX_S	16
+#define CPFL_ACT_16B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S)
+#define CPFL_ACT_16B_VAL_S	0
+#define CPFL_ACT_16B_VAL_M	CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S)
+
+#define CPFL_ACT_MAKE_16B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \
+	 (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M))
+
+/* 16-Bit Action Indices */
+#define CPFL_ACT_16B_INDEX_COUNT_SET		0
+#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX	1
+#define CPFL_ACT_16B_INDEX_SET_VSI		2
+#define CPFL_ACT_16B_INDEX_DEL_MD		4
+#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST		5
+
+/* 16-Bit Action Miscellaneous */
+#define CPFL_ACT_16B_COUNT_SET_CNT		2048 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_SET_VSI_SLOTS		2
+#define CPFL_ACT_16B_FWD_VSI_CNT		1032 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_FWD_VSI_LIST_CNT		256
+#define CPFL_ACT_16B_MOD_VSI_LIST_CNT		1024
+#define CPFL_ACT_16B_FWD_PORT_CNT		4
+#define CPFL_ACT_16B_DEL_MD_MID_CNT		32
+#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS		4
+
+/* 16-Bit SET_MCAST_IDX Action */
+#define CPFL_ACT_16B_SET_MCAST_VALID	((uint32_t)1 << 15)
+
+/* 16-Bit SET_VSI Action Variants */
+#define CPFL_ACT_16B_SET_VSI_VAL_S		0
+#define CPFL_ACT_16B_SET_VSI_VAL_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S)
+#define CPFL_ACT_16B_SET_VSI_PE_S		11
+#define CPFL_ACT_16B_SET_VSI_PE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S)
+#define CPFL_ACT_16B_SET_VSI_TYPE_S		14
+#define CPFL_ACT_16B_SET_VSI_TYPE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S)
+
+/* 16-Bit DEL_MD Action */
+#define CPFL_ACT_16B_DEL_MD_0_S		0
+#define CPFL_ACT_16B_DEL_MD_1_S		5
+
+/* 16-Bit MOD_VSI_LIST Actions */
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S	0
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M	\
+	CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S)
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S	14
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S)
+#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \
+	((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \
+	 (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_ID_M))
+
+#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \
+	((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \
+		CPFL_ACT_16B_SET_VSI_TYPE_M) | \
+	 (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \
+		CPFL_ACT_16B_SET_VSI_PE_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \
+		CPFL_ACT_16B_SET_VSI_VAL_M))
+
+enum cpfl_prot_eng {
+	CPFL_PE_LAN = 0,
+	CPFL_PE_RDMA,
+	CPFL_PE_CRT
+};
+
+enum cpfl_act_fwd_type {
+	CPFL_ACT_FWD_VSI,
+	CPFL_ACT_FWD_VSI_LIST,
+	CPFL_ACT_FWD_PORT
+};
+
+/*******************************************************************************
+ * 24-Bit Actions
+ ******************************************************************************/
+/* Group A */
+#define CPFL_ACT_OP_24B_A_S	28
+#define CPFL_ACT_OP_24B_A_M	CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S)
+#define CPFL_ACT_24B_A_INDEX_S	24
+#define CPFL_ACT_24B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S)
+#define CPFL_ACT_24B_A_VAL_S	0
+#define CPFL_ACT_24B_A_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S)
+
+#define CPFL_ACT_OP_24B_A	((uint32_t)1 << CPFL_ACT_OP_24B_A_S)
+
+#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_MOD_ADDR	0
+#define CPFL_ACT_24B_INDEX_MIRROR_FIRST	1
+#define CPFL_ACT_24B_INDEX_COUNT	2
+#define CPFL_ACT_24B_INDEX_SET_Q	8
+#define CPFL_ACT_24B_INDEX_MOD_PROFILE	9
+#define CPFL_ACT_24B_INDEX_METER	10
+
+#define CPFL_ACT_24B_COUNT_SLOTS	6
+#define CPFL_ACT_24B_METER_SLOTS	6
+
+#define CPFL_ACT_24B_MOD_ADDR_CNT	(16 * 1024 * 1024)
+#define CPFL_ACT_24B_COUNT_ID_CNT	((uint32_t)1 << 24)
+#define CPFL_ACT_24B_SET_Q_CNT		(12 * 1024)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS	3
+
+/* 24-Bit SET_Q Action */
+#define CPFL_ACT_24B_SET_Q_Q_S		0
+#define CPFL_ACT_24B_SET_Q_Q_M		\
+	CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_S	14
+#define CPFL_ACT_24B_SET_Q_Q_RGN_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S)
+#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS	CPFL_MAKE_MASK32(1, 17)
+#define CPFL_ACT_24B_SET_Q_DST_PE_S	21
+#define CPFL_ACT_24B_SET_Q_DST_PE_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S)
+#define CPFL_ACT_24B_SET_Q_VALID	CPFL_MAKE_MASK32(1, 23)
+
+/* 24-Bit MOD_PROFILE Action */
+enum cpfl_act_mod_profile_hint {
+	CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */
+};
+
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_S		0
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S)
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S	12
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S)
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_S		14
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S)
+#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS		((uint32_t)1 << 16)
+#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND	((uint32_t)1 << 17)
+#define CPFL_ACT_24B_MOD_PROFILE_VALID			((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES	4
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT		2048
+
+/* 24-Bit METER Actions */
+#define CPFL_ACT_24B_METER_INDEX_S	0
+#define CPFL_ACT_24B_METER_INDEX_M	\
+	CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S)
+#define CPFL_ACT_24B_METER_BANK_S	20
+#define CPFL_ACT_24B_METER_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S)
+#define CPFL_ACT_24B_METER_VALID	((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_METER_BANK_CNT	6
+#define CPFL_ACT_24B_METER_INDEX_CNT	((uint32_t)1 << 20)
+
+/* Group B */
+#define CPFL_ACT_OP_24B_B_S	27
+#define CPFL_ACT_OP_24B_B_M	CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S)
+#define CPFL_ACT_24B_B_INDEX_S	24
+#define CPFL_ACT_24B_B_INDEX_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S)
+#define CPFL_ACT_24B_B_VAL_S	0
+#define CPFL_ACT_24B_B_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S)
+
+#define CPFL_ACT_OP_24B_B	((uint32_t)1 << CPFL_ACT_OP_24B_B_S)
+
+#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_SET_MD	0
+#define CPFL_ACT_24B_INDEX_RANGE_CHECK	6
+#define CPFL_ACT_24B_SET_MD_SLOTS	6
+
+/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */
+/* 8-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD8_VAL_S	0
+#define CPFL_ACT_24B_SET_MD8_VAL_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S)
+#define CPFL_ACT_24B_SET_MD8_MASK_S	8
+#define CPFL_ACT_24B_SET_MD8_MASK_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S)
+#define CPFL_ACT_24B_SET_MD8_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD8_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S)
+/* 16-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD16_VAL_S	0
+#define CPFL_ACT_24B_SET_MD16_VAL_M	\
+	CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_L_S	16 /* For chained action */
+#define CPFL_ACT_24B_SET_MD16_MASK_L_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_H_SR	8
+#define CPFL_ACT_24B_SET_MD16_MASK_H_M	0xff
+#define CPFL_ACT_24B_SET_MD16_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD16_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD16		((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_SET_MD32_VAL_L_M	CPFL_MAKE_MASK32(24, 0)
+
+#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX		15
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX	7
+#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX	15
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX	7
+
+/* RANGE_CHECK Action */
+enum cpfl_rule_act_rc_mode {
+	CPFL_RULE_ACT_RC_1_RANGE = 0,
+	CPFL_RULE_ACT_RC_2_RANGES = 1,
+	CPFL_RULE_ACT_RC_4_RANGES = 2,
+	CPFL_RULE_ACT_RC_8_RANGES = 3
+};
+
+#define CPFL_ACT_24B_RC_TBL_IDX_S	0
+#define CPFL_ACT_24B_RC_TBL_IDX_M	\
+	CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S)
+#define CPFL_ACT_24B_RC_START_BANK_S	13
+#define CPFL_ACT_24B_RC_START_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S)
+#define CPFL_ACT_24B_RC_MODE_S		16
+#define CPFL_ACT_24B_RC_MODE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S)
+#define CPFL_ACT_24B_RC_XTRACT_PROF_S	18
+#define CPFL_ACT_24B_RC_XTRACT_PROF_M	\
+	CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S)
+
+#define CPFL_ACT_24B_RC_TBL_INDEX_CNT	(8 * 1024)
+#define CPFL_ACT_24B_RC_BANK_CNT	8
+#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT	64
+
+/*******************************************************************************
+ * 24-Bit Chained Auxiliary Actions
+ ******************************************************************************/
+
+/* TODO: HAS is being updated.  Revise the order of chained and base action
+ * when the HAS has it finalized.
+ */
+/**
+ * 24-Bit Chained SET_MD Actions
+ *
+ * Chained SET_MD actions consume two consecutive action sets.  The first one is
+ * the chained AUX action set.  The second one is the base/parent action set.
+ * Chained SET_MD actions can add and/or update metadata structure with IDs from
+ * 0 to 31 while the non-chained SET_MD variants can only update existing meta-
+ * data IDs below 16.
+ */
+
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S	8
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M	\
+	CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD_AUX_ADD		((uint32_t)1 << 15)
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S	16
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_S		0
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_M		\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S	0
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S)
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR	24 /* Upper 8 bits of MD32 */
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M	0xff
+
+#define CPFL_ACT_TYPE_CHAIN_DATA_S	29
+#define CPFL_ACT_TYPE_CHAIN_DATA_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S)
+#define CPFL_ACT_TYPE_CHAIN_DATA	((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_OP_S	21
+#define CPFL_ACT_24B_SET_MD_OP_8B	((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_16B	((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_32B	((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \
+	(CPFL_ACT_TYPE_CHAIN_DATA | (op) | \
+	 (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \
+	 (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \
+	 (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_DATA_M))
+
+/*******************************************************************************
+ * 1-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_drop - Encode a 1-bit DROP action
+ *
+ * The DROP action has precedence over the DEFER_DOP action.
+ * Affect of ACT_COMMIT action on the DROP action:
+ *  - CPFL_ACT_COMMIT_ALL: Packet is dropped.
+ *  - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped.
+ *  - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped.  Recirculation is canceled.
+ *  - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not
+ *    dropped. Recirculation continues.
+ *
+ * Once a DROP action is set, it cannot be reverted during the classification
+ * process of a network packet.
+ */
+static inline union cpfl_action_set
+cpfl_act_drop(uint8_t prec)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1);
+	return a;
+}
+
+/**
+ * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action
+ * An ACT_COMMIT action specifies if and when all actions are committed.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M,
+				  (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S);
+	return a;
+}
+
+/*******************************************************************************
+ * 8-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_meta - Encode an 8-bit MOD_META action
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_meta(uint8_t prec, uint8_t prof)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META,
+				  CPFL_ACT_8B_MOD_META_VALID | prof);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 16-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI)
+ *
+ * This encodes the "Forward to Single VSI" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    vsi >= CPFL_ACT_16B_FWD_VSI_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port)
+ *
+ * This encodes the "Forward to a port" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    port >= CPFL_ACT_16B_FWD_PORT_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 24-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action
+ *
+ * This MOD_ADDR specifies the index of the MOD content entry an accompanying
+ * MOD_PROFILE action uses.  Some MOD_PROFILE actions may need to use extra
+ * information from a Modify content entry, and requires an accompanying
+ * MOD_ADDR action.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR,
+				     mod_addr);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant)
+ *
+ * This action is a "Forward to a single queue" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q,
+			bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region)
+ *
+ * This action is a "Forward to a queue region" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base,
+			       uint8_t q_rgn_bits, bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT ||
+	    q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base |
+		((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action
+ *
+ * This action specifies a Modify profile to use for modifying the network
+ * packet being classified.  In addition, it also provides a hint to whether
+ * or not an accompanied MOD_ADDR action is expected and should be prefetched.
+ *
+ * There is only one MOD_PROFILE action slot.  If multiple classification blocks
+ * emit this action, the precedence value and auxiliary precedence value will be
+ * used to select one with higher precedence.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus,
+		     bool miss_prepend, enum cpfl_act_mod_profile_hint hint)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) ||
+	    prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT ||
+	    ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_MOD_PROFILE_VALID |
+		(((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) &
+			CPFL_ACT_24B_MOD_PROFILE_HINT_M) |
+		(((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) &
+			CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) |
+		((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S);
+	if (append_act_bus)
+		val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS;
+	if (miss_prepend)
+		val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND;
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_meter - Encode a 24-bit METER action
+ *
+ * Return NOP if any given input parameter is invalid.
+ *
+ * A bank can only be used by one of the METER action slots.  If multiple METER
+ * actions select the same bank, the action with the highest action slot wins.
+ * In Policer mode, METER actions at the higher indexes have precedence over
+ * ones at lower indexes.
+ */
+static inline union cpfl_action_set
+cpfl_act_meter(u8 slot, u8 prec, u32 idx, u8 bank)
+{
+	union cpfl_action_set a;
+	u32 val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS  ||
+	    idx >= CPFL_ACT_24B_METER_INDEX_CNT ||
+	    bank >= CPFL_ACT_24B_METER_BANK_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_METER_VALID |
+		(u32)idx << CPFL_ACT_24B_METER_INDEX_S |
+		(u32)bank << CPFL_ACT_24B_METER_BANK_S;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot,
+				     val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot
+ *
+ * This SET_MD action sets/updates a byte of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 bytes of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX ||
+	    off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) |
+		((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) |
+		((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot
+ *
+ * This SET_MD action sets/updates a word of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 words of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX ||
+	    word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) |
+		((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) |
+		((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot
+ *
+ * This SET_MD action sets/updates a dword of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action is made up of 2 chained
+ * action sets.  The chained action set is the first.  The base/parent action
+ * sets is the second.
+ */
+static inline void
+cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid,
+		      uint8_t off, uint32_t val)
+{
+	if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) ||
+	    mid >= CPFL_METADATA_ID_CNT ||
+	    (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) {
+		ext->acts[0] = cpfl_act_nop();
+		ext->acts[1] = cpfl_act_nop();
+	} else {
+		uint32_t tmp;
+
+		/* Chained action set comes first */
+		tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR;
+		ext->acts[0].data =
+			CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B,
+						     mid, off, tmp);
+
+		/* Lower 24 bits of value */
+		tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M;
+		ext->acts[1].data =
+			CPFL_ACT_MAKE_24B_B(prec,
+					    CPFL_ACT_24B_INDEX_SET_MD + slot,
+					    tmp);
+	}
+}
+
+#endif /* _CPFL_ACTIONS_H_ */
diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
new file mode 100644
index 0000000000..97a6bdd042
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include "cpfl_controlq.h"
+#include "base/idpf_controlq.h"
+#include "rte_common.h"
+
+/**
+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
+ * @qinfo: pointer to create control queue info struct
+ *
+ * Verify that DMA parameter of each DMA memory struct is present and
+ * consistent with control queue parameters
+ */
+static inline int
+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
+{
+	struct idpf_dma_mem *ring = &qinfo->ring_mem;
+	struct idpf_dma_mem *buf = &qinfo->buf_mem;
+
+	if (!ring->va || !ring->size)
+		return -EBADR;
+
+	if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
+		return -EINVAL;
+
+	/* no need for buffer checks for TX queues */
+	if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
+		return 0;
+
+	if (!buf->va || !buf->size)
+		return -EBADR;
+
+	/* accommodate different types of rx ring buffer sizes */
+	if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
+	     buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
+	    (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
+	     buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
+		return -EBADR;
+
+	return 0;
+}
+
+/**
+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ * @qinfo: pointer to create queue info struct
+ *
+ * The CP takes care of all DMA memory allocations. Store the allocated memory
+ * information for the descriptor ring and buffers. If the memory for either the
+ * descriptor ring or the buffers is not allocated properly and/or inconsistent
+ * with the control queue parameters, this routine will free the memory for
+ * both the descritpors and the buffers
+ */
+int
+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
+			 struct cpfl_ctlq_create_info *qinfo)
+{
+	int ret_code = 0;
+	unsigned int elem_size;
+	int i = 0;
+
+	ret_code = cpfl_check_dma_mem_parameters(qinfo);
+	if (ret_code)
+		/* TODO: Log an error message per CP */
+		goto err;
+
+	cq->desc_ring.va = qinfo->ring_mem.va;
+	cq->desc_ring.pa = qinfo->ring_mem.pa;
+	cq->desc_ring.size = qinfo->ring_mem.size;
+
+	switch (cq->cq_type) {
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_RDMA_RX:
+		/* Only receive queues will have allocated buffers
+		 * during init.  CP allocates one big chunk of DMA
+		 * region who size is equal to ring_len * buff_size.
+		 * In CPFLib, the block gets broken down to multiple
+		 * smaller blocks that actually gets programmed in the hardware.
+		 */
+
+		cq->bi.rx_buff = (struct idpf_dma_mem **)
+			idpf_calloc(hw, cq->ring_size,
+				    sizeof(struct idpf_dma_mem *));
+		if (!cq->bi.rx_buff) {
+			ret_code = -ENOMEM;
+			/* TODO: Log an error message per CP */
+			goto err;
+		}
+
+		elem_size = qinfo->buf_size;
+		for (i = 0; i < cq->ring_size; i++) {
+			cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
+					    (hw, 1,
+					     sizeof(struct idpf_dma_mem));
+			if (!cq->bi.rx_buff[i]) {
+				ret_code = -ENOMEM;
+				goto free_rx_buffs;
+			}
+			cq->bi.rx_buff[i]->va =
+				(uint64_t *)((char *)qinfo->buf_mem.va +
+					(i * elem_size));
+			cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
+					       (i * elem_size);
+			cq->bi.rx_buff[i]->size = elem_size;
+		}
+		break;
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_RDMA_TX:
+	case IDPF_CTLQ_TYPE_RDMA_COMPL:
+		break;
+	default:
+		ret_code = -EBADR;
+	}
+
+	return ret_code;
+
+free_rx_buffs:
+	i--;
+	for (; i >= 0; i--)
+		idpf_free(hw, cq->bi.rx_buff[i]);
+
+	if (!cq->bi.rx_buff)
+		idpf_free(hw, cq->bi.rx_buff);
+
+err:
+	return ret_code;
+}
+
+/**
+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	for (i = 0; i < cq->ring_size; i++) {
+		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+		/* No buffer to post to descriptor, continue */
+		if (!bi)
+			continue;
+
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+		desc->opcode = 0;
+		desc->datalen = CPU_TO_LE16(bi->size);
+		desc->ret_val = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+		desc->params.indirect.param0 = 0;
+		desc->params.indirect.param1 = 0;
+	}
+}
+
+/**
+ * cpfl_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
+{
+	/* set control queue registers in our local struct */
+	cq->reg.head = q_create_info->reg.head;
+	cq->reg.tail = q_create_info->reg.tail;
+	cq->reg.len = q_create_info->reg.len;
+	cq->reg.bah = q_create_info->reg.bah;
+	cq->reg.bal = q_create_info->reg.bal;
+	cq->reg.len_mask = q_create_info->reg.len_mask;
+	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+	cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * cpfl_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
+{
+	/* Update tail to post pre-allocated buffers for rx queues */
+	if (is_rxq)
+		wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
+
+	/* For non-Mailbox control queues only TAIL need to be set */
+	if (cq->q_id != -1)
+		return;
+
+	/* Clear Head for both send or receive */
+	wr32(hw, cq->reg.head, 0);
+
+	/* set starting point */
+	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
+ * @hw: context info for the callback
+ * @cq: pointer to the specific control queue
+ *
+ * DMA buffers are released by the CP itself
+ */
+static void
+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
+{
+	int i;
+
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
+	    cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
+		for (i = 0; i < cq->ring_size; i++)
+			idpf_free(hw, cq->bi.rx_buff[i]);
+		/* free the buffer header */
+		idpf_free(hw, cq->bi.rx_buff);
+	} else {
+		idpf_free(hw, cq->bi.tx_msg);
+	}
+}
+
+/**
+ * cpfl_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ */
+int
+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+	      struct idpf_ctlq_info **cq_out)
+{
+	struct idpf_ctlq_info *cq;
+	bool is_rxq = false;
+	int status = 0;
+
+	if (!qinfo->len || !qinfo->buf_size ||
+	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+		return -EINVAL;
+
+	cq = (struct idpf_ctlq_info *)
+	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+
+	if (!cq)
+		return -ENOMEM;
+
+	cq->cq_type = qinfo->type;
+	cq->q_id = qinfo->id;
+	cq->buf_size = qinfo->buf_size;
+	cq->ring_size = qinfo->len;
+
+	cq->next_to_use = 0;
+	cq->next_to_clean = 0;
+	cq->next_to_post = cq->ring_size - 1;
+
+	switch (qinfo->type) {
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+		is_rxq = true;
+		/* fallthrough */
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+		status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
+		break;
+
+	default:
+		status = -EINVAL;
+		break;
+	}
+
+	if (status)
+		goto init_free_q;
+
+	if (is_rxq) {
+		cpfl_ctlq_init_rxq_bufs(cq);
+	} else {
+		/* Allocate the array of msg pointers for TX queues */
+		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
+			idpf_calloc(hw, qinfo->len,
+				    sizeof(struct idpf_ctlq_msg *));
+		if (!cq->bi.tx_msg) {
+			status = -ENOMEM;
+			goto init_dealloc_q_mem;
+		}
+	}
+
+	cpfl_ctlq_setup_regs(cq, qinfo);
+
+	cpfl_ctlq_init_regs(hw, cq, is_rxq);
+
+	idpf_init_lock(&cq->cq_lock);
+
+	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
+
+	*cq_out = cq;
+	return status;
+
+init_dealloc_q_mem:
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+	idpf_free(hw, cq);
+	cq = NULL;
+
+	return status;
+}
+
+int
+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+		    struct idpf_ctlq_info **cq)
+{
+	return cpfl_ctlq_add(hw, qinfo, cq);
+}
+
+/**
+ * cpfl_ctlq_shutdown - shutdown the CQ
+ * The main shutdown routine for any controq queue
+ */
+static void
+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (!cq->ring_size)
+		goto shutdown_sq_out;
+
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+
+	/* Set ring_size to 0 to indicate uninitialized queue */
+	cq->ring_size = 0;
+
+shutdown_sq_out:
+	idpf_release_lock(&cq->cq_lock);
+	idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * cpfl_ctlq_remove - deallocate and remove specified control queue
+ */
+static void
+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	LIST_REMOVE(cq, cq_list);
+	cpfl_ctlq_shutdown(hw, cq);
+	idpf_free(hw, cq);
+}
+
+void
+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	cpfl_ctlq_remove(hw, cq);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
new file mode 100644
index 0000000000..930d717f63
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CONTROLQ_H_
+#define _CPFL_CONTROLQ_H_
+
+#include "base/idpf_osdep.h"
+#include "base/idpf_controlq_api.h"
+
+#define CPFL_CTLQ_DESCRIPTOR_SIZE	32
+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE	4096
+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE	256
+#define CPFL_DFLT_MBX_RING_LEN		512
+#define CPFL_CFGQ_RING_LEN		512
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct cpfl_ctlq_create_info {
+	enum idpf_ctlq_type type;
+	int id; /* absolute queue offset passed as input
+		 * -1 for default mailbox if present
+		 */
+	uint16_t len; /* Queue length passed as input */
+	uint16_t buf_size; /* buffer size passed as input */
+	uint64_t base_address; /* output, HPA of the Queue start  */
+	struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+	/* Pass down previously allocated descriptor ring and buffer memory
+	 * for each control queue to be created
+	 */
+	struct idpf_dma_mem ring_mem;
+	/* The CP will allocate one large buffer that the CPFlib will piece
+	 * into individual buffers for each descriptor
+	 */
+	struct idpf_dma_mem buf_mem;
+
+	int ext_info_size;
+	void *ext_info; /* Specific to q type */
+};
+
+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
+			     struct idpf_ctlq_info *cq,
+			     struct cpfl_ctlq_create_info *qinfo);
+int cpfl_ctlq_add(struct idpf_hw *hw,
+		  struct cpfl_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq);
+int cpfl_vport_ctlq_add(struct idpf_hw *hw,
+			struct cpfl_ctlq_create_info *qinfo,
+			struct idpf_ctlq_info **cq);
+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+#endif
diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c
new file mode 100644
index 0000000000..eefae1767c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.c
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include <base/idpf_controlq.h>
+#include <stdint.h>
+#include "cpfl_rules.h"
+
+ /**
+  * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor
+  */
+static inline uint64_t
+cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
+{
+	uint64_t context = 0;
+
+	switch (cmn_cfg->opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		/* fallthrough */
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+	case cpfl_ctlq_sem_add_rule:
+	case cpfl_ctlq_sem_del_rule:
+	case cpfl_ctlq_sem_query_rule:
+	case cpfl_ctlq_sem_update_rule:
+		context |= SHIFT_VAL64(cmn_cfg->time_sel,
+				       MEV_RULE_TIME_SEL);
+		context |= SHIFT_VAL64(cmn_cfg->time_sel_val,
+				       MEV_RULE_TIME_SEL_VAL);
+		context |= SHIFT_VAL64(cmn_cfg->host_id,
+				       MEV_RULE_HOST_ID);
+		context |= SHIFT_VAL64(cmn_cfg->port_num,
+				       MEV_RULE_PORT_NUM);
+		context |= SHIFT_VAL64(cmn_cfg->resp_req,
+				       MEV_RULE_RESP_REQ);
+		context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru,
+				       MEV_RULE_CACHE_WR_THRU);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc_ctx - get bit context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
+{
+	uint64_t context = 0;
+
+	context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common);
+
+	switch (cfg_data->common.opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size,
+				       MEV_RULE_MOD_OBJ_SIZE);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content,
+				       MEV_RULE_PIN_MOD_CONTENT);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.index,
+				       MEV_RULE_MOD_INDEX);
+		break;
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
+				       MEV_RULE_OBJ_ID);
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
+				       MEV_RULE_OBJ_ADDR);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc - build descriptor data from rule config data
+ *
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg)
+{
+	uint64_t context;
+	uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0];
+
+	context = cpfl_prep_rule_desc_ctx(cfg_data);
+	*ctlq_ctx = CPU_TO_LE64(context);
+	memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t));
+	ctlq_msg->opcode = (uint16_t)cfg_data->common.opc;
+	ctlq_msg->data_len = cfg_data->common.buf_len;
+	ctlq_msg->status = 0;
+	ctlq_msg->ctx.indirect.payload = cfg_data->common.payload;
+}
+
+/**
+ * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob)
+{
+	uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions;
+	const uint32_t *act_src = (const uint32_t *)act_bytes;
+	uint32_t i;
+
+	idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
+	idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len,
+		    CPFL_NONDMA_TO_DMA);
+
+	for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
+		*act_dst++ = CPU_TO_LE32(*act_src++);
+
+	*((uint16_t *)&rule_blob->sem_rule.cfg_ctrl) = CPU_TO_LE16(cfg_ctrl);
+}
diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h
new file mode 100644
index 0000000000..317504dabe
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_RULES_API_H_
+#define _CPFL_RULES_API_H_
+
+#include <base/idpf_controlq_api.h>
+#include "cpfl_actions.h"
+#include "cpfl_controlq.h"
+
+/* Common Bit Mask Macros */
+#define CPFL_BIT(b)			(1 << (b))
+
+#define MAKE_MASK(type, mask, shift)	((u##type) (mask) << (shift))
+#define SHIFT_VAL_LT(type, val, field)		\
+		(((u##type)(val) << field##_S) & field##_M)
+#define SHIFT_VAL_RT(type, val, field)		\
+		(((u##type)(val) & field##_M) >> field##_S)
+
+#define MAKE_MASK_VAL(type, bit_len)	(((u##type)0x01 << (bit_len)) - 1)
+#define MAKE_MASK_VAL16(bit_len)	MAKE_MASK_VAL(16, bit_len)
+#define MAKE_MASK_VAL64(bit_len)	MAKE_MASK_VAL(64, bit_len)
+
+#define MAKE_MASK64(mask, shift)	MAKE_MASK(64, mask, shift)
+#define MAKE_MASK16(mask, shift)	MAKE_MASK(16, mask, shift)
+#define MAKE_MASK32(mask, shift)	MAKE_MASK(32, mask, shift)
+
+/* Make masks with bit length and left-shifting count */
+#define MAKE_SMASK(type, bits, shift)	\
+	((((u##type)1 << (bits)) - 1) << (shift))
+#define MAKE_SMASK64(bits, shift)	MAKE_SMASK(64, bits, shift)
+#define MAKE_SMASK32(bits, shift)	MAKE_SMASK(32, bits, shift)
+#define MAKE_SMASK16(bits, shift)	MAKE_SMASK(16, bits, shift)
+
+#define SHIFT_VAL64(val, field)		SHIFT_VAL_LT(64, val, field)
+#define SHIFT_VAL32(val, field)		SHIFT_VAL_LT(32, val, field)
+#define SHIFT_VAL16(val, field)		SHIFT_VAL_LT(16, val, field)
+
+/* Rule Config queue opcodes */
+enum cpfl_ctlq_rule_cfg_opc {
+	cpfl_ctlq_sem_add_rule				= 0x1303,
+	cpfl_ctlq_sem_update_rule			= 0x1304,
+	cpfl_ctlq_sem_del_rule				= 0x1305,
+	cpfl_ctlq_sem_query_rule			= 0x1306,
+	cpfl_ctlq_sem_query_rule_hash_addr		= 0x1307,
+	cpfl_ctlq_sem_query_del_rule_hash_addr		= 0x1308,
+
+	cpfl_ctlq_mod_add_update_rule			= 0x1360,
+	cpfl_ctlq_mod_query_rule			= 0x1361,
+};
+
+enum cpfl_cfg_pkt_error_code {
+	CPFL_CFG_PKT_ERR_OK = 0,
+	CPFL_CFG_PKT_ERR_ESRCH = 1,     /* Bad opcode */
+	CPFL_CFG_PKT_ERR_EEXIST = 2,    /* Entry Already exists */
+	CPFL_CFG_PKT_ERR_ENOSPC = 4,    /* No space left in the table*/
+	CPFL_CFG_PKT_ERR_ERANGE = 5,    /* Parameter out of range */
+	CPFL_CFG_PKT_ERR_ESBCOMP = 6,   /* Completion Error */
+	CPFL_CFG_PKT_ERR_ENOPIN = 7,    /* Entry cannot be pinned in cache */
+	CPFL_CFG_PKT_ERR_ENOTFND = 8,   /* Entry Not exists */
+	CPFL_CFG_PKT_ERR_EMAXCOL = 9    /* Max Hash Collision */
+};
+
+/* macros for creating context for rule descriptor */
+#define MEV_RULE_VSI_ID_S		0
+#define MEV_RULE_VSI_ID_M		\
+		MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S)
+
+#define MEV_RULE_TIME_SEL_S		13
+#define MEV_RULE_TIME_SEL_M		\
+		MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S)
+
+#define MEV_RULE_TIME_SEL_VAL_S		15
+#define MEV_RULE_TIME_SEL_VAL_M		\
+		MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S)
+
+#define MEV_RULE_PORT_NUM_S		16
+#define MEV_RULE_HOST_ID_S		18
+#define MEV_RULE_PORT_NUM_M		\
+		MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S)
+#define MEV_RULE_HOST_ID_M		\
+		MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S)
+
+#define MEV_RULE_CACHE_WR_THRU_S	21
+#define MEV_RULE_CACHE_WR_THRU_M	\
+		MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S)
+
+#define MEV_RULE_RESP_REQ_S		22
+#define MEV_RULE_RESP_REQ_M		\
+		MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S)
+#define MEV_RULE_OBJ_ADDR_S		24
+#define MEV_RULE_OBJ_ADDR_M		\
+		MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S)
+#define MEV_RULE_OBJ_ID_S		59
+#define MEV_RULE_OBJ_ID_M		\
+		MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S)
+
+/* macros for creating CFG_CTRL for sem/lem rule blob */
+#define MEV_RULE_CFG_CTRL_PROF_ID_S			0
+#define MEV_RULE_CFG_CTRL_PROF_ID_M			\
+		MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S)
+
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S		11
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M		\
+		MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S)
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_S		13
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S)
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S	14
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M	\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S)
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S		15
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S)
+
+/**
+ * macro to build the CFG_CTRL for rule packet data, which is one of
+ * cpfl_prep_sem_rule_blob()'s input parameter.
+ */
+ /* build SEM CFG_CTRL*/
+#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id,		       \
+				       pin_to_cache, fixed_fetch)	       \
+		(SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID)	     | \
+		 SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID)   | \
+		 SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE)    | \
+		 SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH))
+
+/* build LEM CFG_CTRL*/
+#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror)    \
+		(SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID)             | \
+		 SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE)      | \
+		 SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR))
+
+/* macros for creating mod content config packets */
+#define MEV_RULE_MOD_INDEX_S		24
+#define MEV_RULE_MOD_INDEX_M		\
+		MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S)
+
+#define MEV_RULE_PIN_MOD_CONTENT_S	62
+#define MEV_RULE_PIN_MOD_CONTENT_M	\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S)
+#define MEV_RULE_MOD_OBJ_SIZE_S		63
+#define MEV_RULE_MOD_OBJ_SIZE_M		\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S)
+
+/**
+ * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM
+ * note: The key may be in mixed big/little endian format, the rest of members
+ * are in little endian
+ */
+struct cpfl_sem_rule_cfg_pkt {
+#define MEV_SEM_RULE_KEY_SIZE 128
+	uint8_t key[MEV_SEM_RULE_KEY_SIZE];
+
+#define MEV_SEM_RULE_ACT_SIZE 72
+	uint8_t actions[MEV_SEM_RULE_ACT_SIZE];
+
+	/* Bit(s):
+	 * 10:0 : PROFILE_ID
+	 * 12:11: SUB_PROF_ID (used for SEM only)
+	 * 13   : pin the SEM key content into the cache
+	 * 14   : Reserved
+	 * 15   : Fixed_fetch
+	 */
+	uint8_t cfg_ctrl[2];
+
+	/* Bit(s):
+	 * 0:     valid
+	 * 15:1:  Hints
+	 * 26:16: PROFILE_ID, the profile associated with the entry
+	 * 31:27: PF
+	 * 55:32: FLOW ID (assigned by HW)
+	 * 63:56: EPOCH
+	 */
+	uint8_t ctrl_word[8];
+	uint8_t padding[46];
+};
+
+/**
+ * union cpfl_rule_cfg_pkt_record - Describes rule data blob
+ */
+union cpfl_rule_cfg_pkt_record {
+	struct cpfl_sem_rule_cfg_pkt sem_rule;
+	uint8_t pkt_data[256];
+	uint8_t mod_blob[256];
+};
+
+/**
+ * cpfl_rule_query_addr - LEM/SEM Rule Query Address strcuture
+ */
+struct cpfl_rule_query_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_query_del_addr - Rule Query and Delete Address
+ */
+struct cpfl_rule_query_del_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_mod_content - MOD Rule Content
+ */
+struct cpfl_rule_mod_content {
+	uint8_t	obj_size;
+	uint8_t	pin_content;
+	uint32_t	index;
+};
+
+/**
+ * cpfl_rule_cfg_data_common - data struct for all rule opcodes
+ *note: some rules may only require part of strucure
+ */
+struct cpfl_rule_cfg_data_common {
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	uint64_t	cookie;
+	uint16_t	vsi_id;
+	uint8_t	port_num;
+	uint8_t	host_id;
+	uint8_t	time_sel;
+	uint8_t	time_sel_val;
+	uint8_t	cache_wr_thru;
+	uint8_t	resp_req;
+	uint32_t	ret_val;
+	uint16_t	buf_len;
+	struct idpf_dma_mem *payload;
+};
+
+/**
+ * cpfl_rule_cfg_data - rule config data
+ * note: Before sending rule to HW, caller needs to fill
+ *       in this struct then call cpfl_prep_rule_desc().
+ */
+struct cpfl_rule_cfg_data {
+	struct cpfl_rule_cfg_data_common common;
+	union {
+		struct cpfl_rule_query_addr query_addr;
+		struct cpfl_rule_query_del_addr query_del_addr;
+		struct cpfl_rule_mod_content mod_content;
+	} ext;
+};
+
+/**
+ * cpfl_fill_rule_mod_content - fill info for mod content
+ */
+static inline void
+cpfl_fill_rule_mod_content(uint8_t mod_obj_size,
+			   uint8_t pin_mod_content,
+			   uint32_t mod_index,
+			   struct cpfl_rule_mod_content *mod_content)
+{
+	mod_content->obj_size = mod_obj_size;
+	mod_content->pin_content = pin_mod_content;
+	mod_content->index = mod_index;
+}
+
+/**
+ * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes
+ * note: call this function before calls cpfl_prep_rule_desc()
+ */
+static inline void
+cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc,
+			       uint64_t cookie,
+			       uint16_t vsi_id,
+			       uint8_t port_num,
+			       uint8_t host_id,
+			       uint8_t time_sel,
+			       uint8_t time_sel_val,
+			       uint8_t cache_wr_thru,
+			       uint8_t resp_req,
+			       uint16_t payload_len,
+			       struct idpf_dma_mem *payload,
+			       struct cpfl_rule_cfg_data_common *cfg_cmn)
+{
+	cfg_cmn->opc = opc;
+	cfg_cmn->cookie = cookie;
+	cfg_cmn->vsi_id = vsi_id;
+	cfg_cmn->port_num = port_num;
+	cfg_cmn->resp_req = resp_req;
+	cfg_cmn->ret_val = 0;
+	cfg_cmn->host_id = host_id;
+	cfg_cmn->time_sel = time_sel;
+	cfg_cmn->time_sel_val = time_sel_val;
+	cfg_cmn->cache_wr_thru = cache_wr_thru;
+
+	cfg_cmn->buf_len = payload_len;
+	cfg_cmn->payload = payload;
+}
+
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg);
+
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob);
+
+#endif /* _CPFL_RULES_API_H_ */
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 7b8d043011..84ba994469 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -43,6 +43,8 @@ js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
     sources += files(
         'cpfl_flow_parser.c',
+        'cpfl_rules.c',
+        'cpfl_controlq.c',
     )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
-- 
2.34.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 4/4] net/cpfl: setup ctrl path
  2023-08-11  9:30 [PATCH 0/4] net/cpfl: add basic support for rte_flow Wenjing Qiao
                   ` (2 preceding siblings ...)
  2023-08-11  9:30 ` [PATCH 3/4] net/cpfl: introduce CPF common library Wenjing Qiao
@ 2023-08-11  9:30 ` Wenjing Qiao
  2023-08-24  9:15   ` Xing, Beilei
  3 siblings, 1 reply; 15+ messages in thread
From: Wenjing Qiao @ 2023-08-11  9:30 UTC (permalink / raw)
  To: yuying.zhang, beilei.xing; +Cc: dev, mingxia.liu, Wenjing Qiao, Qi Zhang

Setup the control vport and control queue for flow offloading.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 270 ++++++++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_ethdev.h |  14 ++
 drivers/net/cpfl/cpfl_vchnl.c  | 144 ++++++++++++++++++
 3 files changed, 425 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a2f308fb86..34b7c22ee1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1783,9 +1783,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 		return;
 	}
 
-	/* ignore if it is exceptional vport */
-	if (adapter->exceptional_vport &&
-	    adapter->exceptional_vport->base.vport_id == vc_event->vport_id)
+	/* ignore if it is ctrl vport or exceptional vport */
+	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id ||
+	    (adapter->exceptional_vport &&
+	     adapter->exceptional_vport->base.vport_id == vc_event->vport_id))
 		return;
 
 	vport = cpfl_find_vport(adapter, vc_event->vport_id);
@@ -1983,6 +1984,260 @@ cpfl_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 }
 
+static int
+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	ret = cpfl_config_ctlq_tx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
+		return ret;
+	}
+
+	ret = cpfl_config_ctlq_rx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
+		return ret;
+	}
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	int i;
+
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+}
+
+static int
+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_ctlq_info *cfg_cq;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
+					  &adapter->cfgq_info[i],
+					  &cfg_cq);
+		if (ret || !cfg_cq) {
+			PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d",
+				    adapter->cfgq_info[i].id);
+			cpfl_remove_cfgqs(adapter);
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
+			    adapter->cfgq_info[i].id);
+		adapter->ctlqp[i] = cfg_cq;
+	}
+
+	return ret;
+}
+
+#define CPFL_CFGQ_RING_LEN		512
+#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
+#define CPFL_CFGQ_BUFFER_SIZE		256
+#define CPFL_CFGQ_RING_SIZE		512
+
+static int
+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	struct cpfl_vport *vport;
+	int i, err;
+	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);
+	uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;
+
+	vport = &adapter->ctrl_vport;
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (i % 2 == 0) {
+			/* Setup Tx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +
+				i / 2 * vport->base.chunks_info.tx_qtail_spacing;
+		} else {
+			/* Setup Rx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +
+				i / 2 * vport->base.chunks_info.rx_qtail_spacing;
+			if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,
+						buf_size)) {
+				err = -ENOMEM;
+				goto free_mem;
+			}
+		}
+		if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,
+					ring_size)) {
+			err = -ENOMEM;
+			goto free_mem;
+		}
+	}
+	return 0;
+free_mem:
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+	return err;
+}
+
+static int
+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_create_vport *vport_info =
+		(struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;
+	int i;
+
+	vport->itf.adapter = adapter;
+	vport->base.adapter = &adapter->base;
+	vport->base.vport_id = vport_info->vport_id;
+
+	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+		if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {
+			vport->base.chunks_info.tx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.tx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.tx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {
+			vport->base.chunks_info.rx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.rx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.rx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else {
+			PMD_INIT_LOG(ERR, "Unsupported chunk type");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)
+{
+	cpfl_remove_cfgqs(adapter);
+	cpfl_stop_cfgqs(adapter);
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+}
+
+static int
+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_vc_create_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to create control vport");
+		return ret;
+	}
+
+	ret = cpfl_init_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init control vport");
+		goto err_init_ctrl_vport;
+	}
+
+	ret = cpfl_cfgq_setup(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control queues");
+		goto err_cfgq_setup;
+	}
+
+	ret = cpfl_add_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to add control queues");
+		goto err_add_cfgq;
+	}
+
+	ret = cpfl_start_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to start control queues");
+		goto err_start_cfgqs;
+	}
+
+	return 0;
+
+err_start_cfgqs:
+	cpfl_stop_cfgqs(adapter);
+err_add_cfgq:
+	cpfl_remove_cfgqs(adapter);
+err_cfgq_setup:
+err_init_ctrl_vport:
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+
+	return ret;
+}
+
 static struct virtchnl2_get_capabilities req_caps = {
 	.csum_caps =
 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
@@ -2150,6 +2405,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+	ret = cpfl_ctrl_path_open(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control path");
+		goto err_create_ctrl_vport;
+	}
+
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2157,6 +2418,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+err_create_ctrl_vport:
+	rte_free(adapter->vports);
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_whitelist_uninit(adapter);
@@ -2450,6 +2713,7 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index cf989a29b3..2e9480ffc1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -22,6 +22,7 @@
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
 #include "cpfl_representor.h"
+#include "cpfl_controlq.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -89,6 +90,10 @@
 
 #define CPFL_FLOW_FILE_LEN 100
 
+#define CPFL_RX_CFGQ_NUM	4
+#define CPFL_TX_CFGQ_NUM	4
+#define CPFL_CFGQ_NUM		8
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -216,10 +221,19 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_whitelist_hash;
+
+	/* ctrl vport and ctrl queues. */
+	struct cpfl_vport ctrl_vport;
+	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
+	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
+	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
 int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
 			   struct cpfl_vport_id *vport_identity,
 			   struct cpchnl2_vport_info *vport_info);
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
index a21a4a451f..932840a972 100644
--- a/drivers/net/cpfl/cpfl_vchnl.c
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 
 	return 0;
 }
+
+int
+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct virtchnl2_create_vport vport_msg;
+	struct idpf_cmd_info args;
+	int err = -1;
+
+	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
+	vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+	vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
+	vport_msg.num_tx_complq = 0;
+	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
+	vport_msg.num_rx_bufq = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+	args.in_args = (uint8_t *)&vport_msg;
+	args.in_args_size = sizeof(vport_msg);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+		return err;
+	}
+
+	rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
+		   IDPF_DFLT_MBX_BUF_SIZE);
+	return err;
+}
+
+int
+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_RX_CFGQ_NUM;
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (!vc_rxqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->base.vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		rxq_info = &vc_rxqs->qinfo[i];
+		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
+		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;
+		rxq_info->max_pkt_size = vport->base.max_pkt_len;
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_rxqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_TX_CFGQ_NUM;
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (!vc_txqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->base.vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		txq_info = &vc_txqs->qinfo[i];
+		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
+		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_txqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
-- 
2.34.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v2 0/4] net/cpfl: add basic support for rte_flow
  2023-08-11  9:30 ` [PATCH 1/4] net/cpfl: parse flow parser file in devargs Wenjing Qiao
@ 2023-08-11 10:00   ` Wenjing Qiao
  2023-08-11 10:00   ` [PATCH v2 1/4] net/cpfl: parse flow parser file in devargs Wenjing Qiao
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 15+ messages in thread
From: Wenjing Qiao @ 2023-08-11 10:00 UTC (permalink / raw)
  To: yuying.zhang, beilei.xing; +Cc: dev, mingxia.liu, Wenjing Qiao

This patchset adds basic support for rte_flow.
---
Depends-on: series-29139 ("net/cpfl: support port representor")

Wenjing Qiao (4):
  net/cpfl: parse flow parser file in devargs
  net/cpfl: add flow json parser
  net/cpfl: introduce CPF common library
  net/cpfl: setup ctrl path

 drivers/net/cpfl/cpfl_actions.h     |  858 +++++++++++++
 drivers/net/cpfl/cpfl_controlq.c    |  379 ++++++
 drivers/net/cpfl/cpfl_controlq.h    |   51 +
 drivers/net/cpfl/cpfl_ethdev.c      |  300 ++++-
 drivers/net/cpfl/cpfl_ethdev.h      |   17 +
 drivers/net/cpfl/cpfl_flow_parser.c | 1758 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  205 ++++
 drivers/net/cpfl/cpfl_rules.c       |  126 ++
 drivers/net/cpfl/cpfl_rules.h       |  306 +++++
 drivers/net/cpfl/cpfl_vchnl.c       |  144 +++
 drivers/net/cpfl/meson.build        |   11 +
 11 files changed, 4151 insertions(+), 4 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

-- 
2.34.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v2 1/4] net/cpfl: parse flow parser file in devargs
  2023-08-11  9:30 ` [PATCH 1/4] net/cpfl: parse flow parser file in devargs Wenjing Qiao
  2023-08-11 10:00   ` [PATCH v2 0/4] net/cpfl: add basic support for rte_flow Wenjing Qiao
@ 2023-08-11 10:00   ` Wenjing Qiao
  2023-08-24  3:15     ` Xing, Beilei
  2023-08-11 10:00   ` [PATCH v2 2/4] net/cpfl: add flow json parser Wenjing Qiao
                     ` (2 subsequent siblings)
  4 siblings, 1 reply; 15+ messages in thread
From: Wenjing Qiao @ 2023-08-11 10:00 UTC (permalink / raw)
  To: yuying.zhang, beilei.xing; +Cc: dev, mingxia.liu, Wenjing Qiao

Add devargs "flow_parser" for rte_flow json parser.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
Depends-on: series-29139 ("net/cpfl: support port representor")
---
 drivers/net/cpfl/cpfl_ethdev.c | 30 +++++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_ethdev.h |  3 +++
 drivers/net/cpfl/meson.build   |  6 ++++++
 3 files changed, 38 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 8dbc175749..a2f308fb86 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -21,6 +21,7 @@
 #define CPFL_TX_SINGLE_Q	"tx_single"
 #define CPFL_RX_SINGLE_Q	"rx_single"
 #define CPFL_VPORT		"vport"
+#define CPFL_FLOW_PARSER	"flow_parser"
 
 rte_spinlock_t cpfl_adapter_lock;
 /* A list for all adapters, one adapter matches one PCI device */
@@ -32,6 +33,9 @@ static const char * const cpfl_valid_args_first[] = {
 	CPFL_TX_SINGLE_Q,
 	CPFL_RX_SINGLE_Q,
 	CPFL_VPORT,
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	CPFL_FLOW_PARSER,
+#endif
 	NULL
 };
 
@@ -1671,6 +1675,19 @@ parse_repr(const char *key __rte_unused, const char *value, void *args)
 	return 0;
 }
 
+#ifdef CPFL_FLOW_JSON_SUPPORT
+static int
+parse_parser_file(const char *key, const char *value, void *args)
+{
+	char *name = args;
+
+	PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key);
+	strlcpy(name, value, CPFL_FLOW_FILE_LEN);
+
+	return 0;
+}
+#endif
+
 static int
 cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter, bool first)
 {
@@ -1719,7 +1736,18 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
 				 &adapter->base.is_rx_singleq);
 	if (ret != 0)
 		goto fail;
-
+#ifdef CPFL_FLOW_JSON_SUPPORT
+	if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) {
+		ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER,
+					 &parse_parser_file, cpfl_args->flow_parser);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Failed to parser flow_parser, ret: %d", ret);
+			goto fail;
+		}
+	} else {
+		cpfl_args->flow_parser[0] = '\0';
+	}
+#endif
 fail:
 	rte_kvargs_free(kvlist);
 	return ret;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 5bd6f930b8..cf989a29b3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -87,6 +87,8 @@
 #define ACC_LCE_ID	15
 #define IMC_MBX_EFD_ID	0
 
+#define CPFL_FLOW_FILE_LEN 100
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -100,6 +102,7 @@ struct cpfl_devargs {
 	uint16_t req_vport_nb;
 	uint8_t repr_args_num;
 	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
+	char flow_parser[CPFL_FLOW_FILE_LEN];
 };
 
 struct p2p_queue_chunks_info {
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index fb075c6860..0be25512c3 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -38,3 +38,9 @@ if arch_subdir == 'x86'
         cflags += ['-DCC_AVX512_SUPPORT']
     endif
 endif
+
+js_dep = dependency('json-c', required: false, method : 'pkg-config')
+if js_dep.found()
+    dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
+    ext_deps += js_dep
+endif
\ No newline at end of file
-- 
2.34.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v2 2/4] net/cpfl: add flow json parser
  2023-08-11  9:30 ` [PATCH 1/4] net/cpfl: parse flow parser file in devargs Wenjing Qiao
  2023-08-11 10:00   ` [PATCH v2 0/4] net/cpfl: add basic support for rte_flow Wenjing Qiao
  2023-08-11 10:00   ` [PATCH v2 1/4] net/cpfl: parse flow parser file in devargs Wenjing Qiao
@ 2023-08-11 10:00   ` Wenjing Qiao
  2023-08-24  7:25     ` Xing, Beilei
  2023-08-11 10:00   ` [PATCH v2 3/4] net/cpfl: introduce CPF common library Wenjing Qiao
  2023-08-11 10:00   ` [PATCH v2 4/4] net/cpfl: setup ctrl path Wenjing Qiao
  4 siblings, 1 reply; 15+ messages in thread
From: Wenjing Qiao @ 2023-08-11 10:00 UTC (permalink / raw)
  To: yuying.zhang, beilei.xing; +Cc: dev, mingxia.liu, Wenjing Qiao

A JSON file will be used to direct DPDK CPF PMD to
parse rte_flow tokens into low level hardware resources
defined in a DDP package file.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
Depends-on: series-29139 ("net/cpfl: support port representor")
---
 drivers/net/cpfl/cpfl_flow_parser.c | 1758 +++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_flow_parser.h |  205 ++++
 drivers/net/cpfl/meson.build        |    3 +
 3 files changed, 1966 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
 create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h

diff --git a/drivers/net/cpfl/cpfl_flow_parser.c b/drivers/net/cpfl/cpfl_flow_parser.c
new file mode 100644
index 0000000000..b4635813ff
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.c
@@ -0,0 +1,1758 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <arpa/inet.h>
+#include <asm-generic/errno-base.h>
+#include <stdint.h>
+
+#include "cpfl_flow_parser.h"
+#include "cpfl_ethdev.h"
+#include "rte_malloc.h"
+
+static enum rte_flow_item_type
+cpfl_get_item_type_by_str(const char *type)
+{
+	if (strcmp(type, "eth") == 0)
+		return RTE_FLOW_ITEM_TYPE_ETH;
+	else if (strcmp(type, "ipv4") == 0)
+		return RTE_FLOW_ITEM_TYPE_IPV4;
+	else if (strcmp(type, "tcp") == 0)
+		return RTE_FLOW_ITEM_TYPE_TCP;
+	else if (strcmp(type, "udp") == 0)
+		return RTE_FLOW_ITEM_TYPE_UDP;
+	else if (strcmp(type, "vxlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VXLAN;
+	else if (strcmp(type, "icmp") == 0)
+		return RTE_FLOW_ITEM_TYPE_ICMP;
+	else if (strcmp(type, "vlan") == 0)
+		return RTE_FLOW_ITEM_TYPE_VLAN;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ITEM_TYPE_VOID;
+}
+
+static enum rte_flow_action_type
+cpfl_get_action_type_by_str(const char *type)
+{
+	if (strcmp(type, "vxlan_encap") == 0)
+		return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+
+	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+	return RTE_FLOW_ACTION_TYPE_VOID;
+}
+
+static const char *
+cpfl_json_object_to_string(json_object *object, const char *name)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return NULL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return 0;
+	}
+	return json_object_get_string(subobject);
+}
+
+static int
+cpfl_json_object_to_int(json_object *object, const char *name, int *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int(subobject);
+	return 0;
+}
+
+static int
+cpfl_json_object_to_uint16(json_object *object, const char *name, uint16_t *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int(subobject);
+	return 0;
+}
+
+static int
+cpfl_json_object_to_uint32(json_object *object, const char *name, uint32_t *value)
+{
+	json_object *subobject;
+
+	if (!object) {
+		PMD_DRV_LOG(ERR, "object doesn't exist.");
+		return -EINVAL;
+	}
+	subobject = json_object_object_get(object, name);
+	if (!subobject) {
+		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
+		return -EINVAL;
+	}
+	*value = json_object_get_int64(subobject);
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_attr(json_object *cjson_pr_key_attr, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len;
+	struct cpfl_flow_js_pr_key_attr *attr;
+
+	len = json_object_array_length(cjson_pr_key_attr);
+	js_pr->key.attributes = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_attr), 0);
+	if (!js_pr->key.attributes) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->key.attr_size = len;
+	attr = js_pr->key.attributes;
+
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		const char *name;
+		uint16_t value = 0;
+		int ret;
+
+		object = json_object_array_get_idx(cjson_pr_key_attr, i);
+		name = cpfl_json_object_to_string(object, "Name");
+		if (!name) {
+			rte_free(js_pr->key.attributes);
+			PMD_DRV_LOG(ERR, "Can not parse string 'Name'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(object, "Value", &value);
+		if (ret < 0) {
+			rte_free(js_pr->key.attributes);
+			PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+			return -EINVAL;
+		}
+		if (strcmp(name, "ingress") == 0) {
+			attr->ingress = value;
+		} else if (strcmp(name, "egress") == 0) {
+			attr->egress = value;
+		} else {
+			/* TODO: more... */
+			rte_free(js_pr->key.attributes);
+			PMD_DRV_LOG(ERR, "Not support attr name: %s.", name);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto_field(json_object *cjson_field,
+				     struct cpfl_flow_js_pr_key_proto *js_field)
+{
+	if (cjson_field) {
+		int len, i;
+
+		len = json_object_array_length(cjson_field);
+		js_field->fields_size = len;
+		if (len == 0)
+			return 0;
+		js_field->fields =
+		    rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto_field) * len, 0);
+		if (!js_field->fields) {
+			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+			return -ENOMEM;
+		}
+		for (i = 0; i < len; i++) {
+			json_object *object;
+			const char *name, *mask;
+
+			object = json_object_array_get_idx(cjson_field, i);
+			name = cpfl_json_object_to_string(object, "name");
+			if (!name) {
+				rte_free(js_field->fields);
+				PMD_DRV_LOG(ERR, "Can not parse string 'name'.");
+				return -EINVAL;
+			}
+			if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
+				rte_free(js_field->fields);
+				PMD_DRV_LOG(ERR, "The 'name' is too long.");
+				return -EINVAL;
+			}
+			memcpy(js_field->fields[i].name, name, strlen(name));
+
+			if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
+			    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				mask = cpfl_json_object_to_string(object, "mask");
+				if (!mask) {
+					rte_free(js_field->fields);
+					PMD_DRV_LOG(ERR, "Can not parse string 'mask'.");
+					return -EINVAL;
+				}
+				memcpy(js_field->fields[i].mask, mask, strlen(mask));
+			} else {
+				uint32_t mask_32b;
+				int ret;
+
+				ret = cpfl_json_object_to_uint32(object, "mask", &mask_32b);
+				if (ret < 0) {
+					rte_free(js_field->fields);
+					PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'.");
+					return -EINVAL;
+				}
+				js_field->fields[i].mask_32b = mask_32b;
+			}
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_key_proto(json_object *cjson_pr_key_proto, struct cpfl_flow_js_pr *js_pr)
+{
+	int len, i, ret;
+
+	len = json_object_array_length(cjson_pr_key_proto);
+	js_pr->key.proto_size = len;
+	js_pr->key.protocols = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_key_proto) * len, 0);
+	if (!js_pr->key.protocols) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_object *object, *cjson_pr_key_proto_fields;
+		const char *type;
+		enum rte_flow_item_type item_type;
+
+		object = json_object_array_get_idx(cjson_pr_key_proto, i);
+		/* pr->key->proto->type */
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			rte_free(js_pr->key.protocols);
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			return -EINVAL;
+		}
+		item_type = cpfl_get_item_type_by_str(type);
+		if (item_type == RTE_FLOW_ITEM_TYPE_VOID) {
+			rte_free(js_pr->key.protocols);
+			return -EINVAL;
+		}
+		js_pr->key.protocols[i].type = item_type;
+		/* pr->key->proto->fields */
+		cjson_pr_key_proto_fields = json_object_object_get(object, "fields");
+		ret = cpfl_flow_js_pattern_key_proto_field(cjson_pr_key_proto_fields,
+							   &js_pr->key.protocols[i]);
+		if (ret < 0) {
+			rte_free(js_pr->key.protocols);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv_proto(json_object *cjson_value, struct cpfl_flow_js_fv *js_fv)
+{
+	uint16_t layer = 0, offset = 0, mask = 0;
+	const char *header;
+	enum rte_flow_item_type type;
+	int ret;
+
+	ret = cpfl_json_object_to_uint16(cjson_value, "layer", &layer);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
+		return -EINVAL;
+	}
+
+	header = cpfl_json_object_to_string(cjson_value, "header");
+	if (!header) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_object_to_uint16(cjson_value, "offset", &offset);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+		return -EINVAL;
+	}
+	ret = cpfl_json_object_to_uint16(cjson_value, "mask", &mask);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
+		return -EINVAL;
+	}
+	js_fv->proto.layer = layer;
+	js_fv->proto.offset = offset;
+	js_fv->proto.mask = mask;
+	type = cpfl_get_item_type_by_str(header);
+	if (type == RTE_FLOW_ITEM_TYPE_VOID)
+		return -EINVAL;
+
+	else
+		js_fv->proto.header = type;
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act_fv(json_object *cjson_fv, struct cpfl_flow_js_pr_action *js_act)
+{
+	int len, i;
+
+	len = json_object_array_length(cjson_fv);
+	js_act->sem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0);
+	if (!js_act->sem.fv) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_act->sem.fv_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_fv *js_fv;
+		json_object *object, *cjson_value;
+		uint16_t offset = 0;
+		const char *type;
+		int ret;
+
+		object = json_object_array_get_idx(cjson_fv, i);
+		js_fv = &js_act->sem.fv[i];
+		ret = cpfl_json_object_to_uint16(object, "offset", &offset);
+		if (ret < 0) {
+			rte_free(js_act->sem.fv);
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			return -EINVAL;
+		}
+		js_fv->offset = offset;
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			rte_free(js_act->sem.fv);
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			return -EINVAL;
+		}
+		cjson_value = json_object_object_get(object, "value");
+		if (strcmp(type, "immediate") == 0) {
+			js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
+			js_fv->immediate = json_object_get_int(cjson_value);
+		}  else if (strcmp(type, "protocol") == 0) {
+			js_fv->type = CPFL_FV_TYPE_PROTOCOL;
+			cpfl_flow_js_pattern_act_fv_proto(cjson_value, js_fv);
+		} else {
+			rte_free(js_act->sem.fv);
+			PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_per_act(json_object *cjson_per_act, struct cpfl_flow_js_pr_action *js_act)
+{
+	const char *type;
+	int ret;
+
+	/* pr->actions->type */
+	type = cpfl_json_object_to_string(cjson_per_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+	/* pr->actions->data */
+	if (strcmp(type, "sem") == 0) {
+		json_object *cjson_fv, *cjson_pr_action_sem;
+
+		js_act->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+		cjson_pr_action_sem = json_object_object_get(cjson_per_act, "data");
+		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem, "profile",
+						 &js_act->sem.prof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem, "subprofile",
+						 &js_act->sem.subprof);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'subprofile'.");
+			return -EINVAL;
+		}
+		ret = cpfl_json_object_to_uint16(cjson_pr_action_sem, "keysize",
+						 &js_act->sem.keysize);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
+			return -EINVAL;
+		}
+		cjson_fv = json_object_object_get(cjson_pr_action_sem, "fieldvectors");
+		ret = cpfl_flow_js_pattern_act_fv(cjson_fv, js_act);
+		if (ret < 0)
+			return ret;
+	} else {
+		PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_act(json_object *cjson_pr_act, struct cpfl_flow_js_pr *js_pr)
+{
+	int i, len, ret;
+
+	len = json_object_array_length(cjson_pr_act);
+	js_pr->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr_action) * len, 0);
+	if (!js_pr->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_pr->actions_size = len;
+	for (i = 0; i < len; i++) {
+		struct cpfl_flow_js_pr_action *js_act;
+		json_object *object;
+
+		object = json_object_array_get_idx(cjson_pr_act, i);
+		js_act = &js_pr->actions[i];
+		ret = cpfl_flow_js_pattern_per_act(object, js_act);
+		if (ret < 0) {
+			rte_free(js_pr->actions);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_pattern_rule(json_object *json_root, struct cpfl_flow_js_parser *parser)
+{
+	json_object *cjson_pr;
+	int i, len;
+
+	/* Pattern Rules */
+	cjson_pr = json_object_object_get(json_root, "patterns");
+	if (!cjson_pr) {
+		PMD_DRV_LOG(ERR, "The patterns is mandatory.");
+		return -EINVAL;
+	}
+
+	len = json_object_array_length(cjson_pr);
+	parser->patterns = rte_malloc(NULL, sizeof(struct cpfl_flow_js_pr) * len, 0);
+	if (!parser->patterns) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	parser->pr_size = len;
+	for (i = 0; i < len; i++) {
+		json_object *object, *cjson_pr_actions, *cjson_pr_key, *cjson_pr_key_proto,
+		    *cjson_pr_key_attr;
+		int ret;
+
+		object = json_object_array_get_idx(cjson_pr, i);
+		/* pr->key */
+		cjson_pr_key = json_object_object_get(object, "key");
+		/* pr->key->protocols */
+		cjson_pr_key_proto = json_object_object_get(cjson_pr_key, "protocols");
+		ret = cpfl_flow_js_pattern_key_proto(cjson_pr_key_proto, &parser->patterns[i]);
+		if (ret < 0) {
+			rte_free(parser->patterns);
+			PMD_DRV_LOG(ERR, "Can not parse key->protocols.");
+			return -EINVAL;
+		}
+		/* pr->key->attributes */
+		cjson_pr_key_attr = json_object_object_get(cjson_pr_key, "attributes");
+		ret = cpfl_flow_js_pattern_key_attr(cjson_pr_key_attr, &parser->patterns[i]);
+		if (ret < 0) {
+			rte_free(parser->patterns);
+			PMD_DRV_LOG(ERR, "Can not parse key->attributes.");
+			return -EINVAL;
+		}
+		/* pr->actions */
+		cjson_pr_actions = json_object_object_get(object, "actions");
+		ret = cpfl_flow_js_pattern_act(cjson_pr_actions, &parser->patterns[i]);
+		if (ret < 0) {
+			rte_free(parser->patterns);
+			PMD_DRV_LOG(ERR, "Can not parse pattern action.");
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_mr_key(json_object *cjson_mr_key, struct cpfl_flow_js_mr_key *js_mr_key)
+{
+	int len, i;
+
+	len = json_object_array_length(cjson_mr_key);
+	js_mr_key->actions = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_key_action) * len, 0);
+	if (!js_mr_key->actions) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+	js_mr_key->actions_size = len;
+	for (i = 0; i < len; i++) {
+		json_object *object, *cjson_mr_key_data;
+		const char *type;
+		enum rte_flow_action_type act_type;
+
+		object = json_object_array_get_idx(cjson_mr_key, i);
+		/* mr->key->actions->type */
+		type = cpfl_json_object_to_string(object, "type");
+		if (!type) {
+			rte_free(js_mr_key->actions);
+			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+			return -EINVAL;
+		}
+		act_type = cpfl_get_action_type_by_str(type);
+		if (act_type == RTE_FLOW_ACTION_TYPE_VOID) {
+			rte_free(js_mr_key->actions);
+			return -EINVAL;
+		}
+		js_mr_key->actions[i].type = act_type;
+		/* mr->key->actions->data */
+		cjson_mr_key_data = json_object_object_get(object, "data");
+		if (js_mr_key->actions[i].type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			json_object *cjson_mr_key_proto;
+			int proto_size, j;
+			struct cpfl_flow_js_mr_key_action_vxlan_encap *encap;
+
+			cjson_mr_key_proto = json_object_object_get(cjson_mr_key_data, "protocols");
+			encap = &js_mr_key->actions[i].encap;
+			if (!cjson_mr_key_proto) {
+				encap->proto_size = 0;
+				continue;
+			}
+			proto_size = json_object_array_length(cjson_mr_key_proto);
+			encap->proto_size = proto_size;
+			for (j = 0; j < proto_size; j++) {
+				const char *s;
+				json_object *subobject;
+				enum rte_flow_item_type proto_type;
+
+				subobject = json_object_array_get_idx(cjson_mr_key_proto, j);
+				s = json_object_get_string(subobject);
+				proto_type = cpfl_get_item_type_by_str(s);
+				if (proto_type == RTE_FLOW_ITEM_TYPE_VOID) {
+					rte_free(js_mr_key->actions);
+					PMD_DRV_LOG(ERR, "parse VXLAN_ENCAP failed.");
+					return -EINVAL;
+				}
+				encap->protocols[j] = proto_type;
+			}
+
+		} else {
+			PMD_DRV_LOG(ERR, "not support this type: %d.", js_mr_key->actions[i].type);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_flow_js_mr_layout(json_object *cjson_layout, struct cpfl_flow_js_mr_action_mod *js_mod)
+{
+	int len, i;
+
+	len = json_object_array_length(cjson_layout);
+	js_mod->layout_size = len;
+	if (len == 0)
+		return 0;
+	js_mod->layout = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr_layout) * len, 0);
+	if (!js_mod->layout) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		json_object *object;
+		int index = 0, size = 0, offset = 0, ret;
+		const char *hint;
+
+		object = json_object_array_get_idx(cjson_layout, i);
+		ret = cpfl_json_object_to_int(object, "index", &index);
+		if (ret < 0) {
+			rte_free(js_mod->layout);
+			PMD_DRV_LOG(ERR, "Can not parse 'index'.");
+			return -EINVAL;
+		}
+		js_mod->layout[i].index = index;
+		ret = cpfl_json_object_to_int(object, "size", &size);
+		if (ret < 0) {
+			rte_free(js_mod->layout);
+			PMD_DRV_LOG(ERR, "Can not parse 'size'.");
+			return -EINVAL;
+		}
+		js_mod->layout[i].size = size;
+		ret = cpfl_json_object_to_int(object, "offset", &offset);
+		if (ret < 0) {
+			rte_free(js_mod->layout);
+			PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
+			return -EINVAL;
+		}
+		js_mod->layout[i].offset = offset;
+		hint = cpfl_json_object_to_string(object, "hint");
+		if (!hint) {
+			rte_free(js_mod->layout);
+			PMD_DRV_LOG(ERR, "Can not parse string 'hint'.");
+			return -EINVAL;
+		}
+		memcpy(js_mod->layout[i].hint, hint, strlen(hint));
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_mr_action(json_object *cjson_mr_act, struct cpfl_flow_js_mr_action *js_mr_act)
+{
+	json_object *cjson_mr_action_data;
+	const char *type;
+
+	/* mr->action->type */
+	type = cpfl_json_object_to_string(cjson_mr_act, "type");
+	if (!type) {
+		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
+		return -EINVAL;
+	}
+
+	/* mr->action->data */
+	cjson_mr_action_data = json_object_object_get(cjson_mr_act, "data");
+	if (strcmp(type, "mod") == 0) {
+		json_object *layout;
+		uint16_t profile = 0;
+		int ret;
+
+		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		ret = cpfl_json_object_to_uint16(cjson_mr_action_data, "profile", &profile);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
+			return -EINVAL;
+		}
+		js_mr_act->mod.prof = profile;
+		layout = json_object_object_get(cjson_mr_action_data, "layout");
+		ret = cpfl_flow_js_mr_layout(layout, &js_mr_act->mod);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "Can not parse layout.");
+			return ret;
+		}
+	} else  {
+		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_flow_js_mod_rule(json_object *json_root, struct cpfl_flow_js_parser *parser)
+{
+	json_object *cjson_mr;
+	int i, len;
+
+	cjson_mr = json_object_object_get(json_root, "modifications");
+	if (!cjson_mr) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	len = json_object_array_length(cjson_mr);
+	parser->mr_size = len;
+	if (len == 0)
+		return 0;
+	parser->modifications = rte_malloc(NULL, sizeof(struct cpfl_flow_js_mr) * len, 0);
+	if (!parser->modifications) {
+		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		int ret;
+		json_object *object, *cjson_mr_key, *cjson_mr_action, *cjson_mr_key_action;
+
+		object = json_object_array_get_idx(cjson_mr, i);
+		/* mr->key */
+		cjson_mr_key = json_object_object_get(object, "key");
+		/* mr->key->actions */
+		cjson_mr_key_action = json_object_object_get(cjson_mr_key, "actions");
+
+		ret = cpfl_flow_js_mr_key(cjson_mr_key_action, &parser->modifications[i].key);
+		if (ret < 0) {
+			rte_free(parser->modifications);
+			PMD_DRV_LOG(ERR, "parse mr_key failed.");
+			return -EINVAL;
+		}
+		/* mr->action */
+		cjson_mr_action = json_object_object_get(object, "action");
+		ret = cpfl_flow_js_mr_action(cjson_mr_action, &parser->modifications[i].action);
+		if (ret < 0) {
+			rte_free(parser->modifications);
+			PMD_DRV_LOG(ERR, "parse mr_action failed.");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_parser_init(json_object *json_root, struct cpfl_flow_js_parser *parser)
+{
+	int ret = 0;
+
+	ret = cpfl_flow_js_pattern_rule(json_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
+		return ret;
+	}
+	ret = cpfl_flow_js_mod_rule(json_root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
+		return ret;
+	}
+
+	return ret;
+}
+
+int
+cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char *filename)
+{
+	struct cpfl_flow_js_parser *parser;
+	json_object *root;
+	int ret;
+
+	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser), 0);
+	if (!parser) {
+		PMD_DRV_LOG(ERR, "Not enough memory to create flow parser.");
+		return -ENOMEM;
+	}
+	root = json_object_from_file(filename);
+	if (!root) {
+		PMD_DRV_LOG(ERR, "Can not load JSON file: %s.", filename);
+		rte_free(parser);
+		return -EINVAL;
+	}
+	ret = cpfl_parser_init(root, parser);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "parser init failed.");
+		rte_free(parser);
+		return -EINVAL;
+	}
+	*flow_parser = parser;
+
+	ret = json_object_put(root);
+	if (ret != 1) {
+		PMD_DRV_LOG(ERR, "Free json_object failed.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
+{
+	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		if (pr_act->sem.fv)
+			rte_free(pr_act->sem.fv);
+	}
+}
+
+int
+cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
+{
+	int i, j;
+
+	for (i = 0; i < parser->pr_size; i++) {
+		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
+
+		for (j = 0; j < pattern->key.proto_size; j++) {
+			if (pattern->key.protocols[j].fields)
+				rte_free(pattern->key.protocols[j].fields);
+		}
+		if (pattern->key.protocols)
+			rte_free(pattern->key.protocols);
+
+		if (pattern->key.attributes)
+			rte_free(pattern->key.attributes);
+
+		for (j = 0; j < pattern->actions_size; j++) {
+			struct cpfl_flow_js_pr_action *pr_act;
+
+			pr_act = &pattern->actions[j];
+			cpfl_parser_free_pr_action(pr_act);
+		}
+
+		if (pattern->actions)
+			rte_free(pattern->actions);
+	}
+	if (parser->patterns)
+		rte_free(parser->patterns);
+
+	for (i = 0; i < parser->mr_size; i++) {
+		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
+
+		if (mr->key.actions)
+			rte_free(mr->key.actions);
+		if (mr->action.type == CPFL_JS_MR_ACTION_TYPE_MOD && mr->action.mod.layout)
+			rte_free(mr->action.mod.layout);
+	}
+	if (parser->modifications)
+		rte_free(parser->modifications);
+
+	rte_free(parser);
+	return 0;
+}
+
+static int
+cpfl_get_items_length(const struct rte_flow_item *items)
+{
+	int length = 0;
+	const struct rte_flow_item *item = items;
+
+	while ((item + length++)->type != RTE_FLOW_ITEM_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_get_actions_length(const struct rte_flow_action *actions)
+{
+	int length = 0;
+	const struct rte_flow_action *action = actions;
+
+	while ((action + length++)->type != RTE_FLOW_ACTION_TYPE_END)
+		continue;
+	return length;
+}
+
+static int
+cpfl_parse_fv_protocol(struct cpfl_flow_js_fv *js_fv, const struct rte_flow_item *items,
+		       uint16_t offset, uint8_t *fv)
+{
+	uint16_t v_layer, v_offset, v_mask;
+	enum rte_flow_item_type v_header;
+	int j, layer, length;
+	uint16_t temp_fv;
+
+	length = cpfl_get_items_length(items);
+
+	v_layer = js_fv->proto.layer;
+	v_header = js_fv->proto.header;
+	v_offset = js_fv->proto.offset;
+	v_mask = js_fv->proto.mask;
+	layer = 0;
+	for (j = 0; j < length - 1; j++) {
+		if (items[j].type == v_header) {
+			if (layer == v_layer) {
+				/* copy out 16 bits from offset */
+				const uint8_t *pointer;
+
+				pointer = &(((const uint8_t *)(items[j].spec))[v_offset]);
+				temp_fv = ntohs((*((const uint16_t *)pointer)) & v_mask);
+				fv[2 * offset] = (uint8_t)((temp_fv & 0xff00) >> 8);
+				fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+				break;
+			}
+			layer++;
+		} /* TODO: more type... */
+	}
+	return 0;
+}
+
+static int
+cpfl_parse_fieldvectors(struct cpfl_flow_js_fv *js_fvs, int size, uint8_t *fv,
+			const struct rte_flow_item *items)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		uint16_t offset, temp_fv, value_int;
+		enum cpfl_flow_js_fv_type type;
+		struct cpfl_flow_js_fv *js_fv;
+
+		js_fv = &js_fvs[i];
+		offset = js_fv->offset;
+		type = js_fv->type;
+		/* type = int */
+		if (type == CPFL_FV_TYPE_IMMEDIATE) {
+			value_int = js_fv->immediate;
+			temp_fv = (value_int << 8) & 0xff00;
+			fv[2 * offset] = (uint8_t)((temp_fv & 0xff00) >> 8);
+			fv[2 * offset + 1] = (uint8_t)(temp_fv & 0x00ff);
+		} else if (type == CPFL_FV_TYPE_PROTOCOL) {
+			ret = cpfl_parse_fv_protocol(js_fv, items, offset, fv);
+			if (ret)
+				return ret;
+		} else {
+			PMD_DRV_LOG(DEBUG, "not support this type: %d.", type);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_parse_pr_actions(struct cpfl_flow_js_pr_action *actions,
+		      int size,
+		      const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int i, ret;
+
+	for (i = 0; i < size; i++) {
+		struct cpfl_flow_js_pr_action *pr_act;
+		enum cpfl_flow_pr_action_type type;
+
+		pr_act = &actions[i];
+		/* pr->actions->type */
+		type = pr_act->type;
+		/* pr->actions->data */
+		if (attr->group % 10 == 1  && type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+			struct cpfl_flow_js_pr_action_sem *sem = &pr_act->sem;
+
+			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
+			pr_action->sem.prof = sem->prof;
+			pr_action->sem.subprof = sem->subprof;
+			pr_action->sem.keysize = sem->keysize;
+			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
+			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
+			ret = cpfl_parse_fieldvectors(sem->fv, sem->fv_size,
+						      pr_action->sem.cpfl_flow_pr_fv, items);
+			return ret;
+		} else if (attr->group > 4 || attr->group == 0) {
+			return -EPERM;
+		}
+	}
+	return 0;
+}
+
+static int
+str2MAC(const char *mask, uint8_t *addr_bytes)
+{
+	int i, size, j;
+	uint8_t n;
+
+	size = strlen(mask);
+	n = 0;
+	j = 0;
+	for (i = 0; i < size; i++) {
+		char ch = mask[i];
+
+		if (ch == ':') {
+			if (j >= RTE_ETHER_ADDR_LEN)
+				return -EINVAL;
+			addr_bytes[j++] = n;
+			n = 0;
+		} else if (ch >= 'a' && ch <= 'f') {
+			n = n * 16 + ch - 'a' + 10;
+		} else if (ch >= 'A' && ch <= 'F') {
+			n = n * 16 + ch - 'A' + 10;
+		} else if (ch >= '0' && ch <= '9') {
+			n = n * 16 + ch - '0';
+		} else {
+			return -EINVAL;
+		}
+	}
+	if (j < RTE_ETHER_ADDR_LEN)
+		addr_bytes[j++] = n;
+
+	if (j != RTE_ETHER_ADDR_LEN)
+		return -EINVAL;
+	return 0;
+}
+
+static int
+cpfl_check_eth_mask(const char *mask, const uint8_t addr_bytes[RTE_ETHER_ADDR_LEN])
+{
+	int i, ret;
+	uint8_t mask_bytes[RTE_ETHER_ADDR_LEN] = {0};
+
+	ret = str2MAC(mask, mask_bytes);
+	if (ret < 0) {
+		PMD_DRV_LOG(ERR, "string to mac address failed.");
+		return -EINVAL;
+	}
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+		if (mask_bytes[i] != addr_bytes[i])
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
+{
+	uint32_t out_addr;
+
+	/* success return 0; invalid return -EINVAL; fail return -ENOTSUP */
+	int ret = inet_pton(AF_INET, mask, &out_addr);
+
+	if (ret < 0)
+		return -EINVAL;
+
+	if (out_addr != addr)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_eth *eth_mask)
+{
+	int field_size, j;
+	int flag_dst_addr, flag_src_addr, flag_ether_type;
+	struct cpfl_flow_js_pr_key_proto_field *field;
+
+	if (!proto)
+		return 0;
+	/* eth_mask->dst.addr_bytes */
+
+	field_size = proto->fields_size;
+	if (field_size != 0 && !eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && eth_mask)
+		return -EINVAL;
+
+	if (field_size == 0 && !eth_mask)
+		return 0;
+
+	flag_dst_addr = false;
+	flag_src_addr = false;
+	flag_ether_type = false;
+	for (j = 0; j < field_size; j++) {
+		const char *name, *s_mask;
+
+		field = &proto->fields[j];
+		/* match: rte_flow_item_eth.dst, more see Field Mapping
+		 */
+		name = field->name;
+		/* match: rte_flow_item->mask */
+		if (strcmp(name, "src_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->src.addr_bytes) < 0)
+				return -EINVAL;
+			flag_src_addr = true;
+		} else if (strcmp(name, "dst_addr") == 0) {
+			s_mask = field->mask;
+			if (cpfl_check_eth_mask(s_mask, eth_mask->dst.addr_bytes) < 0)
+				return -EINVAL;
+			flag_dst_addr = true;
+		} else if (strcmp(name, "ether_type") == 0) {
+			uint16_t mask = (uint16_t)field->mask_32b;
+
+			if (mask != eth_mask->type)
+				return -EINVAL;
+			flag_ether_type = true;
+		} else {
+			/* TODO: more type... */
+			PMD_DRV_LOG(ERR, "not support this name.");
+			return -EINVAL;
+		}
+	}
+	if (!flag_src_addr) {
+		if (strcmp((const char *)eth_mask->src.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_dst_addr) {
+		if (strcmp((const char *)eth_mask->dst.addr_bytes, "\x00\x00\x00\x00\x00\x00") != 0)
+			return -EINVAL;
+	}
+	if (!flag_ether_type) {
+		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_ipv4 *ipv4_mask)
+{
+	if (proto) {
+		int field_size, j;
+		int flag_next_proto_id, flag_src_addr, flag_dst_addr;
+		struct cpfl_flow_js_pr_key_proto_field *field;
+
+		field_size = proto->fields_size;
+		if (field_size != 0 && !ipv4_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && ipv4_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && !ipv4_mask)
+			return 0;
+
+		flag_dst_addr = false;
+		flag_src_addr = false;
+		flag_next_proto_id = false;
+		for (j = 0; j < field_size; j++) {
+			const char *name;
+
+			field = &proto->fields[j];
+			name = field->name;
+			if (strcmp(name, "src_addr") == 0) {
+				/* match: rte_flow_item->mask */
+				const char *mask;
+
+				mask = field->mask;
+				if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.src_addr) < 0)
+					return -EINVAL;
+				flag_src_addr = true;
+			} else if (strcmp(name, "dst_addr") == 0) {
+				const char *mask;
+
+				mask = field->mask;
+				if (cpfl_check_ipv4_mask(mask, ipv4_mask->hdr.dst_addr) < 0)
+					return -EINVAL;
+				flag_dst_addr = true;
+			} else if (strcmp(name, "next_proto_id") == 0) {
+				uint8_t mask;
+
+				mask = (uint8_t)field->mask_32b;
+				if (mask != ipv4_mask->hdr.next_proto_id)
+					return -EINVAL;
+				flag_next_proto_id = true;
+			} else {
+				PMD_DRV_LOG(ERR, "not support this name.");
+				return -EINVAL;
+			}
+		}
+		if (!flag_src_addr) {
+			if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
+				return -EINVAL;
+		}
+		if (!flag_dst_addr) {
+			if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
+				return -EINVAL;
+		}
+		if (!flag_next_proto_id) {
+			if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
+				return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_check_tcp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_tcp *tcp_mask)
+{
+	if (proto) {
+		int field_size, j;
+		int flag_src_port, flag_dst_port;
+		struct cpfl_flow_js_pr_key_proto_field *field;
+
+		field_size = proto->fields_size;
+		if (field_size != 0 && !tcp_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && tcp_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && !tcp_mask)
+			return 0;
+
+		flag_src_port = false;
+		flag_dst_port = false;
+		for (j = 0; j < field_size; j++) {
+			const char *name;
+			uint16_t mask;
+
+			field = &proto->fields[j];
+			/* match: rte_flow_item_eth.dst */
+			name = field->name;
+			/* match: rte_flow_item->mask */
+			mask = (uint16_t)field->mask_32b;
+			if (strcmp(name, "src_port") == 0) {
+				if (tcp_mask->hdr.src_port != mask)
+					return -EINVAL;
+				flag_src_port = true;
+			} else if (strcmp(name, "dst_port") == 0) {
+				if (tcp_mask->hdr.dst_port != mask)
+					return -EINVAL;
+				flag_dst_port = true;
+			} else {
+				PMD_DRV_LOG(ERR, "not support this name.");
+				return -EINVAL;
+			}
+		}
+		if (!flag_src_port) {
+			if (tcp_mask->hdr.src_port != (rte_be16_t)0)
+				return -EINVAL;
+		}
+		if (!flag_dst_port) {
+			if (tcp_mask->hdr.dst_port != (rte_be16_t)0)
+				return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_check_udp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_udp *udp_mask)
+{
+	if (proto) {
+		int field_size, j;
+		bool flag_src_port, flag_dst_port;
+		struct cpfl_flow_js_pr_key_proto_field *field;
+
+		field_size = proto->fields_size;
+		if (field_size != 0 && !udp_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && udp_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && !udp_mask)
+			return 0;
+
+		flag_src_port = false;
+		flag_dst_port = false;
+		for (j = 0; j < field_size; j++) {
+			const char *name;
+			uint16_t mask;
+
+			field = &proto->fields[j];
+			/* match: rte_flow_item_eth.dst */
+			name = field->name; /* match: rte_flow_item->mask */
+			mask = (uint16_t)field->mask_32b;
+			if (strcmp(name, "src_port") == 0) {
+				if (udp_mask->hdr.src_port != mask)
+					return -EINVAL;
+				flag_src_port = true;
+			} else if (strcmp(name, "dst_port") == 0) {
+				if (udp_mask->hdr.dst_port != mask)
+					return -EINVAL;
+				flag_dst_port = true;
+			} else {
+				PMD_DRV_LOG(ERR, "not support this name.");
+				return -EINVAL;
+			}
+		}
+		if (!flag_src_port) {
+			if (udp_mask->hdr.src_port != (rte_be16_t)0)
+				return -EINVAL;
+		}
+		if (!flag_dst_port) {
+			if (udp_mask->hdr.dst_port != (rte_be16_t)0)
+				return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_check_vxlan(struct cpfl_flow_js_pr_key_proto *proto,
+		 const struct rte_flow_item_vxlan *vxlan_mask)
+{
+	if (proto) {
+		int field_size, j;
+		struct cpfl_flow_js_pr_key_proto_field *field;
+
+		field_size = proto->fields_size;
+		if (field_size != 0 && !vxlan_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && vxlan_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && !vxlan_mask)
+			return 0;
+
+		for (j = 0; j < field_size; j++) {
+			const char *name;
+			int64_t mask;
+
+			field = &proto->fields[j];
+			name = field->name;
+			/* match: rte_flow_item->mask */
+			mask = (int64_t)field->mask_32b;
+			if (strcmp(name, "vx_vni") == 0) {
+				if ((int64_t)RTE_BE32(vxlan_mask->hdr.vx_vni) != mask)
+					return -EINVAL;
+			} else {
+				PMD_DRV_LOG(ERR, "not support this name.");
+				return -EINVAL;
+			}
+		}
+	}
+	return 0;
+}
+
+static int
+cpfl_check_icmp(struct cpfl_flow_js_pr_key_proto *proto, const struct rte_flow_item_icmp *icmp_mask)
+{
+	if (proto) {
+		int field_size;
+
+		field_size = proto->fields_size;
+		if (field_size != 0 && !icmp_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && icmp_mask)
+			return -EINVAL;
+
+		if (field_size == 0 && !icmp_mask)
+			return 0;
+	}
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
+			     int proto_size,
+			     const struct rte_flow_item *items)
+{
+	int i, length, j = 0;
+
+	length = cpfl_get_items_length(items);
+
+	if (proto_size > length - 1)
+		return -EINVAL;
+
+	for (i = 0; i < proto_size; i++) {
+		struct cpfl_flow_js_pr_key_proto *key_proto;
+		enum rte_flow_item_type type;
+
+		key_proto = &protocols[i];
+		/* pr->key->proto->type */
+		type = key_proto->type;
+		/* pr->key->proto->fields */
+		switch (type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
+				const struct rte_flow_item_eth *eth_mask;
+				int ret;
+
+				eth_mask = (const struct rte_flow_item_eth *)items[i].mask;
+				ret = cpfl_check_eth(key_proto, eth_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_IPV4) {
+				const struct rte_flow_item_ipv4 *ipv4_mask;
+				int ret;
+
+				ipv4_mask = (const struct rte_flow_item_ipv4 *)items[i].mask;
+				ret = cpfl_check_ipv4(key_proto, ipv4_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_TCP) {
+				const struct rte_flow_item_tcp *tcp_mask;
+				int ret;
+
+				tcp_mask = (const struct rte_flow_item_tcp *)items[i].mask;
+				ret = cpfl_check_tcp(key_proto, tcp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_UDP) {
+				const struct rte_flow_item_udp *udp_mask;
+				int ret;
+
+				udp_mask = (const struct rte_flow_item_udp *)items[i].mask;
+				ret = cpfl_check_udp(key_proto, udp_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+				const struct rte_flow_item_vxlan *vxlan_mask;
+				int ret;
+
+				vxlan_mask = (const struct rte_flow_item_vxlan *)items[i].mask;
+				ret = cpfl_check_vxlan(key_proto, vxlan_mask);
+				if (ret < 0)
+					return ret;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ICMP) {
+				const struct rte_flow_item_icmp *icmp_mask;
+				int ret;
+
+				icmp_mask = (const struct rte_flow_item_icmp *)items[i].mask;
+				ret = cpfl_check_icmp(key_proto, icmp_mask);
+				if (ret < 0)
+					return ret;
+
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+	if (items[j].type != RTE_FLOW_ITEM_TYPE_END)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key_attr(struct cpfl_flow_js_pr_key_attr *key_attr,
+			    const struct rte_flow_attr *attr)
+{
+	/* match: struct rte_flow_attr(ingress,egress) */
+	if (key_attr->ingress != attr->ingress) {
+		PMD_DRV_LOG(DEBUG, "ingress not match.");
+		return -EINVAL;
+	}
+	if (key_attr->egress != attr->egress) {
+		PMD_DRV_LOG(DEBUG, "egress not match.");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cpfl_check_pattern_key(struct cpfl_flow_js_pr *pattern,
+		       const struct rte_flow_item *items,
+		       const struct rte_flow_attr *attr)
+{
+	int ret;
+
+	/* pr->key */
+
+	/* pr->key->protocols */
+	ret = cpfl_check_pattern_key_proto(pattern->key.protocols,
+					   pattern->key.proto_size, items);
+	if (ret < 0)
+		return -EINVAL;
+
+	/* pr->key->attributes */
+	ret = cpfl_check_pattern_key_attr(pattern->key.attributes, attr);
+	if (ret < 0)
+		return -EINVAL;
+	return 0;
+}
+
+/* output: struct cpfl_flow_pr_action* pr_action */
+static int
+cpfl_parse_pattern_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_item *items,
+			 const struct rte_flow_attr *attr,
+			 struct cpfl_flow_pr_action *pr_action)
+{
+	int i, size;
+	struct cpfl_flow_js_pr *pattern;
+
+	size = parser->pr_size;
+	for (i = 0; i < size; i++) {
+		int ret;
+
+		pattern = &parser->patterns[i];
+		ret = cpfl_check_pattern_key(pattern, items, attr);
+		if (ret < 0)
+			continue;
+		/* pr->actions */
+		ret = cpfl_parse_pr_actions(pattern->actions, pattern->actions_size, items, attr,
+					    pr_action);
+		return ret;
+	}
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_items(struct cpfl_flow_js_parser *parser, const struct rte_flow_item *items,
+		      const struct rte_flow_attr *attr,
+		      struct cpfl_flow_pr_action *pr_action)
+{
+	int ret;
+
+	/* Pattern Rules */
+	ret = cpfl_parse_pattern_rules(parser, items, attr, pr_action);
+	return ret;
+}
+
+/* modifications rules */
+static int
+cpfl_check_actions_vxlan_encap(struct cpfl_flow_mr_key_action_vxlan_encap *encap,
+			       const struct rte_flow_action *action)
+{
+	const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+	struct rte_flow_item *definition;
+	int def_length, i, proto_size;
+
+	action_vxlan_encap = (const struct rte_flow_action_vxlan_encap *)action->conf;
+	definition = action_vxlan_encap->definition;
+	def_length = cpfl_get_items_length(definition);
+	proto_size = encap->proto_size;
+	if (proto_size != def_length - 1) {
+		PMD_DRV_LOG(DEBUG, "protocols not match.");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < proto_size; i++) {
+		enum rte_flow_item_type proto;
+
+		proto = encap->protocols[i];
+		if (proto == RTE_FLOW_ITEM_TYPE_VLAN) {
+			if (definition[i].type != RTE_FLOW_ITEM_TYPE_VOID) {
+				PMD_DRV_LOG(DEBUG, "protocols not match.");
+				return -EINVAL;
+			}
+		} else if (proto != definition[i].type) {
+			PMD_DRV_LOG(DEBUG, "protocols not match.");
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+/* output: struct cpfl_flow_mr_key_action *mr_key_action */
+/* check and parse */
+static int
+cpfl_parse_mr_key_action(struct cpfl_flow_js_mr_key_action *key_acts, int size,
+			 const struct rte_flow_action *actions,
+			 struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int actions_length, i;
+	int j = 0;
+	int ret;
+
+	actions_length = cpfl_get_actions_length(actions);
+	if (size > actions_length - 1)
+		return -EINVAL;
+
+	for (i = 0; i < size; i++) {
+		enum rte_flow_action_type type;
+		struct cpfl_flow_js_mr_key_action *key_act;
+
+		key_act = &key_acts[i];
+		/* mr->key->actions->type */
+		type = key_act->type;
+		/* mr->key->actions->data */
+		/* match: <type> action matches RTE_FLOW_ACTION_TYPE_<type> */
+
+		if (type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			int proto_size, k;
+			struct cpfl_flow_mr_key_action_vxlan_encap *encap;
+
+			while (j < actions_length &&
+			       actions[j].type != RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+				j++;
+			}
+			if (j >= actions_length)
+				return -EINVAL;
+
+			mr_key_action[i].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
+			mr_key_action[i].encap.action = &actions[j];
+			encap = &mr_key_action[i].encap;
+
+			proto_size = key_act->encap.proto_size;
+			encap->proto_size = proto_size;
+			for (k = 0; k < proto_size; k++) {
+				enum rte_flow_item_type proto;
+
+				proto = key_act->encap.protocols[k];
+				encap->protocols[k] = proto;
+			}
+
+			ret = cpfl_check_actions_vxlan_encap(encap, &actions[j]);
+			if (ret < 0)
+				return -EINVAL;
+
+			j++;
+		} else {
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+/* output: uint8_t *buffer, uint16_t *byte_len */
+static int
+cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
+		  struct cpfl_flow_mr_key_action *mr_key_action,
+		  uint8_t *buffer, uint16_t *byte_len)
+{
+	int i, start = 0;
+
+	for (i = 0; i < layout_size; i++) {
+		int index, size, offset;
+		const char *hint;
+		const uint8_t *addr;
+		struct cpfl_flow_mr_key_action *temp;
+		struct cpfl_flow_js_mr_layout *layout;
+
+		layout = &layouts[i];
+		/* index links to the element of the actions array. */
+		index = layout->index;
+		size = layout->size;
+		offset = layout->offset;
+		if (index == -1) {
+			hint = "dummpy";
+			start += size;
+			continue;
+		}
+		hint = layout->hint;
+		addr = NULL;
+		temp = mr_key_action + index;
+
+		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
+			const struct rte_flow_action_vxlan_encap *action_vxlan_encap;
+			struct rte_flow_item *definition;
+			int def_length, k;
+
+			action_vxlan_encap =
+			    (const struct rte_flow_action_vxlan_encap *)temp->encap.action->conf;
+			definition = action_vxlan_encap->definition;
+			def_length = cpfl_get_items_length(definition);
+			for (k = 0; k < def_length - 1; k++) {
+				if ((strcmp(hint, "eth") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_ETH) ||
+				    (strcmp(hint, "ipv4") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_IPV4) ||
+				    (strcmp(hint, "udp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_UDP) ||
+				    (strcmp(hint, "tcp") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_TCP) ||
+				    (strcmp(hint, "vxlan") == 0 &&
+				     definition[k].type == RTE_FLOW_ITEM_TYPE_VXLAN)) {
+					addr = (const uint8_t *)(definition[k].spec);
+					if (start > 255) {
+						*byte_len = 0;
+						PMD_DRV_LOG(ERR, "byte length is too long%s",
+							    hint);
+						return -EINVAL;
+					}
+					memcpy(buffer + start, addr + offset, size);
+					break;
+				} /* TODO: more hint... */
+			}
+			if (k == def_length - 1) {
+				*byte_len = 0;
+				PMD_DRV_LOG(ERR, "can not find corresponding hint: %s", hint);
+				return -EINVAL;
+			}
+		} else {
+			*byte_len = 0;
+			PMD_DRV_LOG(ERR, "Not support this type: %d.", temp->type);
+			return -EINVAL;
+		}
+		/* else TODO: more type... */
+
+		start += size;
+	}
+	*byte_len = start;
+	return 0;
+}
+
+static int
+cpfl_parse_mr_action(struct cpfl_flow_js_mr_action *action,
+		     struct cpfl_flow_mr_key_action *mr_key_action,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+	enum cpfl_flow_mr_action_type type;
+
+	/* mr->action->type */
+	type = action->type;
+	/* mr->action->data */
+	if (type == CPFL_JS_MR_ACTION_TYPE_MOD) {
+		struct cpfl_flow_js_mr_layout *layout;
+
+		mr_action->type = CPFL_JS_MR_ACTION_TYPE_MOD;
+		mr_action->mod.byte_len = 0;
+		mr_action->mod.prof = action->mod.prof;
+		layout = action->mod.layout;
+		if (layout) {
+			int ret;
+
+			memset(mr_action->mod.data, 0, sizeof(mr_action->mod.data));
+			ret = cpfl_parse_layout(layout, action->mod.layout_size, mr_key_action,
+						mr_action->mod.data, &mr_action->mod.byte_len);
+			if (ret < 0)
+				return -EINVAL;
+		}
+		return 0;
+	}
+
+	PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
+	return -EINVAL;
+}
+
+static int
+cpfl_check_mod_key(struct cpfl_flow_js_mr *mr, const struct rte_flow_action *actions,
+		   struct cpfl_flow_mr_key_action *mr_key_action)
+{
+	int key_action_size;
+
+	/* mr->key->actions */
+	key_action_size = mr->key.actions_size;
+	return cpfl_parse_mr_key_action(mr->key.actions, key_action_size, actions, mr_key_action);
+}
+
+/* output: struct cpfl_flow_mr_action *mr_action */
+static int
+cpfl_parse_mod_rules(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+		     struct cpfl_flow_mr_action *mr_action)
+{
+#define CPFL_MOD_KEY_NUM_MAX 8
+	int i, size;
+	struct cpfl_flow_mr_key_action mr_key_action[CPFL_MOD_KEY_NUM_MAX] = {0};
+
+	size = parser->mr_size;
+
+	for (i = 0; i < size; i++) {
+		int ret;
+		struct cpfl_flow_js_mr *mr;
+
+		mr = &parser->modifications[i];
+		ret = cpfl_check_mod_key(mr, actions, mr_key_action);
+		if (ret < 0)
+			continue;
+		/* mr->action */
+		ret = cpfl_parse_mr_action(&mr->action, mr_key_action, mr_action);
+		if (!ret)
+			return 0;
+	}
+	return -EINVAL;
+}
+
+int
+cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser, const struct rte_flow_action *actions,
+			struct cpfl_flow_mr_action *mr_action)
+{
+	/* modifications rules */
+	if (!parser->modifications) {
+		PMD_DRV_LOG(INFO, "The modifications is optional.");
+		return 0;
+	}
+
+	return cpfl_parse_mod_rules(parser, actions, mr_action);
+}
diff --git a/drivers/net/cpfl/cpfl_flow_parser.h b/drivers/net/cpfl/cpfl_flow_parser.h
new file mode 100644
index 0000000000..af33051ce2
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_parser.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include <json-c/json.h>
+#include <rte_flow.h>
+
+#ifndef _CPFL_FLOW_PARSER_H_
+#define _CPFL_FLOW_PARSER_H_
+
+#define CPFL_FLOW_JSON_STR_SIZE_MAX 100
+
+/* Pattern Rules Storage Begin*/
+enum cpfl_flow_pr_action_type {
+	CPFL_JS_PR_ACTION_TYPE_SEM,
+	CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
+};
+
+struct cpfl_flow_js_pr_key_attr {
+	uint16_t ingress;
+	uint16_t egress;
+};
+
+struct cpfl_flow_js_pr_key_proto_field {
+	char name[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	union {
+		char mask[CPFL_FLOW_JSON_STR_SIZE_MAX];
+		uint32_t mask_32b;
+	};
+};
+
+struct cpfl_flow_js_pr_key_proto {
+	enum rte_flow_item_type type;
+	struct cpfl_flow_js_pr_key_proto_field *fields;
+	int fields_size;
+};
+
+enum cpfl_flow_js_fv_type {
+	CPFL_FV_TYPE_PROTOCOL,
+	CPFL_FV_TYPE_IMMEDIATE,
+	CPFL_FV_TYPE_UNKNOWN = -1,
+
+};
+
+struct cpfl_flow_js_fv {
+	uint16_t offset;
+	enum cpfl_flow_js_fv_type type;
+	union {
+		uint16_t immediate;
+		struct {
+			uint16_t layer;
+			enum rte_flow_item_type header;
+			uint16_t offset;
+			uint16_t mask;
+		} proto;
+	};
+};
+
+#define CPFL_MAX_SEM_FV_KEY_SIZE 64
+struct cpfl_flow_js_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	struct cpfl_flow_js_fv *fv;
+	int fv_size;
+};
+
+struct cpfl_flow_js_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_js_pr_action_sem sem;
+	};
+};
+
+struct cpfl_flow_js_pr {
+	struct {
+		struct cpfl_flow_js_pr_key_proto *protocols;
+		uint16_t proto_size;
+		struct cpfl_flow_js_pr_key_attr *attributes;
+		uint16_t attr_size;
+	} key;
+	struct cpfl_flow_js_pr_action *actions;
+	uint16_t actions_size;
+};
+
+/* Pattern Rules Storage End */
+
+/* Modification Rules Storage Begin */
+#define CPFL_FLOW_JS_PROTO_SIZE 16
+struct cpfl_flow_js_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	int proto_size;
+};
+
+struct cpfl_flow_js_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_js_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_js_mr_key {
+	struct cpfl_flow_js_mr_key_action *actions;
+	int actions_size;
+};
+
+struct cpfl_flow_js_mr_layout {
+	int index;
+	char hint[CPFL_FLOW_JSON_STR_SIZE_MAX];
+	uint16_t offset;
+	uint16_t size;
+};
+
+struct cpfl_flow_js_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	struct cpfl_flow_js_mr_layout *layout;
+	int layout_size;
+};
+
+enum cpfl_flow_mr_action_type {
+	CPFL_JS_MR_ACTION_TYPE_MOD,
+};
+
+struct cpfl_flow_js_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_js_mr_action_mod mod;
+	};
+};
+
+struct cpfl_flow_js_mr {
+	struct cpfl_flow_js_mr_key key;
+	struct cpfl_flow_js_mr_action action;
+};
+
+/* Modification Rules Storage End */
+
+struct cpfl_flow_js_parser {
+	struct cpfl_flow_js_pr *patterns;
+	int pr_size;
+	struct cpfl_flow_js_mr *modifications;
+	int mr_size;
+};
+
+/* Pattern Rules Begin */
+struct cpfl_flow_pr_action_sem {
+	uint16_t prof;
+	uint16_t subprof;
+	uint16_t keysize;
+	uint8_t cpfl_flow_pr_fv[CPFL_MAX_SEM_FV_KEY_SIZE];
+};
+
+struct cpfl_flow_pr_action {
+	enum cpfl_flow_pr_action_type type;
+	union {
+		struct cpfl_flow_pr_action_sem sem;
+	};
+};
+
+/* Pattern Rules End */
+
+/* Modification Rules Begin */
+struct cpfl_flow_mr_key_action_vxlan_encap {
+	enum rte_flow_item_type protocols[CPFL_FLOW_JS_PROTO_SIZE];
+	uint16_t proto_size;
+	const struct rte_flow_action *action;
+};
+
+struct cpfl_flow_mr_key_action {
+	enum rte_flow_action_type type;
+	union {
+		struct cpfl_flow_mr_key_action_vxlan_encap encap;
+	};
+};
+
+struct cpfl_flow_mr_action_mod {
+	uint16_t prof;
+	uint16_t byte_len;
+	uint8_t data[256];
+};
+
+struct cpfl_flow_mr_action {
+	enum cpfl_flow_mr_action_type type;
+	union {
+		struct cpfl_flow_mr_action_mod mod;
+	};
+};
+
+/* Modification Rules End */
+
+struct cpfl_pipeline_stage {
+	int stage;
+	int recircle;
+};
+
+int cpfl_parser_create(struct cpfl_flow_js_parser **parser, const char *filename);
+int cpfl_parser_destroy(struct cpfl_flow_js_parser *parser);
+int cpfl_flow_parse_items(struct cpfl_flow_js_parser *parser,
+			  const struct rte_flow_item *items,
+			  const struct rte_flow_attr *attr,
+			  struct cpfl_flow_pr_action *pr_action);
+int cpfl_flow_parse_actions(struct cpfl_flow_js_parser *parser,
+			    const struct rte_flow_action *actions,
+			    struct cpfl_flow_mr_action *mr_action);
+#endif
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 0be25512c3..7b8d043011 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -41,6 +41,9 @@ endif
 
 js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
+    sources += files(
+        'cpfl_flow_parser.c',
+    )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
 endif
\ No newline at end of file
-- 
2.34.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v2 3/4] net/cpfl: introduce CPF common library
  2023-08-11  9:30 ` [PATCH 1/4] net/cpfl: parse flow parser file in devargs Wenjing Qiao
                     ` (2 preceding siblings ...)
  2023-08-11 10:00   ` [PATCH v2 2/4] net/cpfl: add flow json parser Wenjing Qiao
@ 2023-08-11 10:00   ` Wenjing Qiao
  2023-08-11 10:00   ` [PATCH v2 4/4] net/cpfl: setup ctrl path Wenjing Qiao
  4 siblings, 0 replies; 15+ messages in thread
From: Wenjing Qiao @ 2023-08-11 10:00 UTC (permalink / raw)
  To: yuying.zhang, beilei.xing; +Cc: dev, mingxia.liu, Wenjing Qiao

Add common library support for CPFL rte_flow to
create/delete rules.

Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
Depends-on: series-29139 ("net/cpfl: support port representor")
---
 drivers/net/cpfl/cpfl_actions.h  | 858 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_controlq.c | 379 ++++++++++++++
 drivers/net/cpfl/cpfl_controlq.h |  51 ++
 drivers/net/cpfl/cpfl_rules.c    | 126 +++++
 drivers/net/cpfl/cpfl_rules.h    | 306 +++++++++++
 drivers/net/cpfl/meson.build     |   2 +
 6 files changed, 1722 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_actions.h
 create mode 100644 drivers/net/cpfl/cpfl_controlq.c
 create mode 100644 drivers/net/cpfl/cpfl_controlq.h
 create mode 100644 drivers/net/cpfl/cpfl_rules.c
 create mode 100644 drivers/net/cpfl/cpfl_rules.h

diff --git a/drivers/net/cpfl/cpfl_actions.h b/drivers/net/cpfl/cpfl_actions.h
new file mode 100644
index 0000000000..7b82119e39
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_actions.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_ACTIONS_H_
+#define _CPFL_ACTIONS_H_
+
+#include "base/idpf_osdep.h"
+
+#pragma pack(1)
+
+union cpfl_action_set {
+	uint32_t data;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 4;
+		uint32_t tag : 1;
+		uint32_t prec : 3;
+	} set_24b_a;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t idx : 3;
+		uint32_t tag : 2;
+		uint32_t prec : 3;
+	} set_24b_b;
+
+	struct {
+		uint32_t val : 16;
+		uint32_t idx : 4;
+		uint32_t unused : 6;
+		uint32_t tag : 3;
+		uint32_t prec : 3;
+	} set_16b;
+
+	struct {
+		uint32_t val_a : 8;
+		uint32_t val_b : 8;
+		uint32_t idx_a : 4;
+		uint32_t idx_b : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_8b;
+
+	struct {
+		uint32_t val : 10;
+		uint32_t ena : 10;
+		uint32_t idx : 4;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} set_1b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} nop;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} chained_24b;
+
+	struct {
+		uint32_t val : 24;
+		uint32_t tag : 5;
+		uint32_t prec : 3;
+	} aux_flags;
+};
+
+struct cpfl_action_set_ext {
+#define CPFL_ACTION_SET_EXT_CNT 2
+	union cpfl_action_set acts[CPFL_ACTION_SET_EXT_CNT];
+};
+
+#pragma pack()
+
+/**
+ * cpfl_act_nop - Encode a NOP action
+ */
+static inline union cpfl_action_set
+cpfl_act_nop(void)
+{
+	union cpfl_action_set act;
+
+	act.data = 0;
+	return act;
+}
+
+/**
+ * cpfl_is_nop_action - Indicate if an action set is a NOP
+ */
+static inline bool
+cpfl_is_nop_action(union cpfl_action_set *act)
+{
+	return act->data == cpfl_act_nop().data;
+}
+
+#define CPFL_MAKE_MASK32(b, s)	((((uint32_t)1 << (b)) - 1) << (s))
+
+#define CPFL_ACT_PREC_MAX	7
+#define CPFL_ACT_PREC_S		29
+#define CPFL_ACT_PREC_M		CPFL_MAKE_MASK32(3, CPFL_ACT_PREC_S)
+#define CPFL_ACT_PREC_SET(p)	\
+	(((uint32_t)(p) << CPFL_ACT_PREC_S) & CPFL_ACT_PREC_M)
+#define CPFL_ACT_PREC_CHECK(p)	((p) > 0 && (p) <= CPFL_ACT_PREC_MAX)
+
+#define CPFL_METADATA_ID_CNT		32	/* Max number of metadata IDs */
+#define CPFL_METADATA_STRUCT_MAX_SZ	128	/* Max metadata size per ID */
+
+/*******************************************************************************
+ * 1-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_1B_OP_S	24
+#define CPFL_ACT_1B_OP_M	CPFL_MAKE_MASK32(5, CPFL_ACT_1B_OP_S)
+#define CPFL_ACT_1B_OP		((uint32_t)(0x01) << CPFL_ACT_1B_OP_S)
+
+#define CPFL_ACT_1B_VAL_S	0
+#define CPFL_ACT_1B_VAL_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_VAL_S)
+#define CPFL_ACT_1B_EN_S	10
+#define CPFL_ACT_1B_EN_M	CPFL_MAKE_MASK32(10, CPFL_ACT_1B_EN_S)
+#define CPFL_ACT_1B_INDEX_S	20
+#define CPFL_ACT_1B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_1B_INDEX_S)
+
+/* 1-bit actions currently uses only INDEX of 0 */
+#define CPFL_ACT_MAKE_1B(prec, en, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_1B_OP | \
+	 ((((uint32_t)0) << CPFL_ACT_1B_INDEX_S) & CPFL_ACT_1B_INDEX_M) | \
+	 (((uint32_t)(en) << CPFL_ACT_1B_EN_S) & CPFL_ACT_1B_EN_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_1B_VAL_S) & CPFL_ACT_1B_VAL_M))
+
+enum cpfl_act_1b_op {
+	CPFL_ACT_1B_OP_DROP		= 0x01,
+	CPFL_ACT_1B_OP_HDR_SPLIT	= 0x02,
+	CPFL_ACT_1B_OP_DIR_CHANGE	= 0x04,
+	CPFL_ACT_1B_OP_DEFER_DROP	= 0x08,
+	CPFL_ACT_1B_OP_ORIG_MIR_MD	= 0x80
+};
+
+#define CPFL_ACT_1B_COMMIT_MODE_S	4
+#define CPFL_ACT_1B_COMMIT_MODE_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_1B_COMMIT_MODE_S)
+
+/**
+ * cpfl_act_commit_mode - action commit mode for certain action classes
+ */
+enum cpfl_act_commit_mode {
+	/* Action processing for the initial classification pass */
+	CPFL_ACT_COMMIT_ALL		= 0, /* Commit all actions */
+	CPFL_ACT_COMMIT_PRE_MOD		= 1, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_NONE		= 2, /* Commit no action */
+	/* Action processing for deferred actions in a recirculation pass */
+	CPFL_ACT_COMMIT_RECIR_ALL	= 4, /* Commit all actions */
+	CPFL_ACT_COMMIT_RECIR_PRE_MOD	= 5, /* Commit only pre-modify actions*/
+	CPFL_ACT_COMMIT_RECIR_NONE	= 6  /* Commit no action */
+};
+
+/*******************************************************************************
+ * 8-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_8B_S	24
+#define CPFL_ACT_OP_8B_M	CPFL_MAKE_MASK32(5, CPFL_ACT_OP_8B_S)
+#define CPFL_ACT_OP_8B		((uint32_t)(0x02) << CPFL_ACT_OP_8B_S)
+
+#define CPFL_ACT_8B_A_VAL_S	0
+#define CPFL_ACT_8B_A_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_A_VAL_S)
+#define CPFL_ACT_8B_A_INDEX_S	16
+#define CPFL_ACT_8B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_A_INDEX_S)
+
+#define CPFL_ACT_8B_B_VAL_S	8
+#define CPFL_ACT_8B_B_VAL_M	CPFL_MAKE_MASK32(8, CPFL_ACT_8B_B_VAL_S)
+#define CPFL_ACT_8B_B_INDEX_S	20
+#define CPFL_ACT_8B_B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_8B_B_INDEX_S)
+
+/* Unless combining two 8-bit actions into an action set, both A and B fields
+ * must be the same,
+ */
+#define CPFL_ACT_MAKE_8B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_8B | \
+	 (((idx) << CPFL_ACT_8B_A_INDEX_S) & CPFL_ACT_8B_A_INDEX_M) | \
+	 (((idx) << CPFL_ACT_8B_B_INDEX_S) & CPFL_ACT_8B_B_INDEX_M) | \
+	 (((val) << CPFL_ACT_8B_A_VAL_S) & CPFL_ACT_8B_A_VAL_M) | \
+	 (((val) << CPFL_ACT_8B_B_VAL_S) & CPFL_ACT_8B_B_VAL_M))
+
+/* 8-Bit Action Indices */
+#define CPFL_ACT_8B_INDEX_MOD_META		9
+
+/* 8-Bit Action Miscellaneous */
+#define CPFL_ACT_8B_MOD_META_PROF_CNT		16
+#define CPFL_ACT_8B_MOD_META_VALID		0x80
+
+/*******************************************************************************
+ * 16-Bit Actions
+ ******************************************************************************/
+#define CPFL_ACT_OP_16B_S	26
+#define CPFL_ACT_OP_16B_M	CPFL_MAKE_MASK32(3, CPFL_ACT_OP_16B_S)
+#define CPFL_ACT_OP_16B		((uint32_t)0x1 << CPFL_ACT_OP_16B_S)
+
+#define CPFL_ACT_16B_INDEX_S	16
+#define CPFL_ACT_16B_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_16B_INDEX_S)
+#define CPFL_ACT_16B_VAL_S	0
+#define CPFL_ACT_16B_VAL_M	CPFL_MAKE_MASK32(16, CPFL_ACT_16B_VAL_S)
+
+#define CPFL_ACT_MAKE_16B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_16B | \
+	 (((uint32_t)(idx) << CPFL_ACT_16B_INDEX_S) & CPFL_ACT_16B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_VAL_S) & CPFL_ACT_16B_VAL_M))
+
+/* 16-Bit Action Indices */
+#define CPFL_ACT_16B_INDEX_COUNT_SET		0
+#define CPFL_ACT_16B_INDEX_SET_MCAST_IDX	1
+#define CPFL_ACT_16B_INDEX_SET_VSI		2
+#define CPFL_ACT_16B_INDEX_DEL_MD		4
+#define CPFL_ACT_16B_INDEX_MOD_VSI_LIST		5
+
+/* 16-Bit Action Miscellaneous */
+#define CPFL_ACT_16B_COUNT_SET_CNT		2048 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_SET_VSI_SLOTS		2
+#define CPFL_ACT_16B_FWD_VSI_CNT		1032 /* TODO: Value from NSL */
+#define CPFL_ACT_16B_FWD_VSI_LIST_CNT		256
+#define CPFL_ACT_16B_MOD_VSI_LIST_CNT		1024
+#define CPFL_ACT_16B_FWD_PORT_CNT		4
+#define CPFL_ACT_16B_DEL_MD_MID_CNT		32
+#define CPFL_ACT_16B_MOD_VSI_LIST_SLOTS		4
+
+/* 16-Bit SET_MCAST_IDX Action */
+#define CPFL_ACT_16B_SET_MCAST_VALID	((uint32_t)1 << 15)
+
+/* 16-Bit SET_VSI Action Variants */
+#define CPFL_ACT_16B_SET_VSI_VAL_S		0
+#define CPFL_ACT_16B_SET_VSI_VAL_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_16B_SET_VSI_VAL_S)
+#define CPFL_ACT_16B_SET_VSI_PE_S		11
+#define CPFL_ACT_16B_SET_VSI_PE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_PE_S)
+#define CPFL_ACT_16B_SET_VSI_TYPE_S		14
+#define CPFL_ACT_16B_SET_VSI_TYPE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_SET_VSI_TYPE_S)
+
+/* 16-Bit DEL_MD Action */
+#define CPFL_ACT_16B_DEL_MD_0_S		0
+#define CPFL_ACT_16B_DEL_MD_1_S		5
+
+/* 16-Bit MOD_VSI_LIST Actions */
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_S	0
+#define CPFL_ACT_16B_MOD_VSI_LIST_ID_M	\
+	CPFL_MAKE_MASK32(10, CPFL_ACT_16B_MOD_VSI_LIST_ID_S)
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_S	14
+#define CPFL_ACT_16B_MOD_VSI_LIST_OP_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_16B_MOD_VSI_LIST_OP_S)
+#define CPFL_MAKE_16B_MOD_VSI_LIST(op, id) \
+	((((uint32_t)(op) << CPFL_ACT_16B_MOD_VSI_LIST_OP_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_OP_M) | \
+	 (((uint32_t)(id) << CPFL_ACT_16B_MOD_VSI_LIST_ID_S) & \
+		CPFL_ACT_16B_MOD_VSI_LIST_ID_M))
+
+#define CPFL_ACT_16B_MAKE_SET_VSI(type, pe, val) \
+	((((uint32_t)(type) << CPFL_ACT_16B_SET_VSI_TYPE_S) & \
+		CPFL_ACT_16B_SET_VSI_TYPE_M) | \
+	 (((uint32_t)(pe) << CPFL_ACT_16B_SET_VSI_PE_S) & \
+		CPFL_ACT_16B_SET_VSI_PE_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_16B_SET_VSI_VAL_S) & \
+		CPFL_ACT_16B_SET_VSI_VAL_M))
+
+enum cpfl_prot_eng {
+	CPFL_PE_LAN = 0,
+	CPFL_PE_RDMA,
+	CPFL_PE_CRT
+};
+
+enum cpfl_act_fwd_type {
+	CPFL_ACT_FWD_VSI,
+	CPFL_ACT_FWD_VSI_LIST,
+	CPFL_ACT_FWD_PORT
+};
+
+/*******************************************************************************
+ * 24-Bit Actions
+ ******************************************************************************/
+/* Group A */
+#define CPFL_ACT_OP_24B_A_S	28
+#define CPFL_ACT_OP_24B_A_M	CPFL_MAKE_MASK32(1, CPFL_ACT_OP_24B_A_S)
+#define CPFL_ACT_24B_A_INDEX_S	24
+#define CPFL_ACT_24B_A_INDEX_M	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_A_INDEX_S)
+#define CPFL_ACT_24B_A_VAL_S	0
+#define CPFL_ACT_24B_A_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_A_VAL_S)
+
+#define CPFL_ACT_OP_24B_A	((uint32_t)1 << CPFL_ACT_OP_24B_A_S)
+
+#define CPFL_ACT_MAKE_24B_A(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_A | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_A_INDEX_S) & CPFL_ACT_24B_A_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_A_VAL_S) & CPFL_ACT_24B_A_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_MOD_ADDR	0
+#define CPFL_ACT_24B_INDEX_MIRROR_FIRST	1
+#define CPFL_ACT_24B_INDEX_COUNT	2
+#define CPFL_ACT_24B_INDEX_SET_Q	8
+#define CPFL_ACT_24B_INDEX_MOD_PROFILE	9
+#define CPFL_ACT_24B_INDEX_METER	10
+
+#define CPFL_ACT_24B_COUNT_SLOTS	6
+#define CPFL_ACT_24B_METER_SLOTS	6
+
+#define CPFL_ACT_24B_MOD_ADDR_CNT	(16 * 1024 * 1024)
+#define CPFL_ACT_24B_COUNT_ID_CNT	((uint32_t)1 << 24)
+#define CPFL_ACT_24B_SET_Q_CNT		(12 * 1024)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_BITS	3
+
+/* 24-Bit SET_Q Action */
+#define CPFL_ACT_24B_SET_Q_Q_S		0
+#define CPFL_ACT_24B_SET_Q_Q_M		\
+	CPFL_MAKE_MASK32(14, CPFL_ACT_24B_SET_Q_Q_S)
+#define CPFL_ACT_24B_SET_Q_Q_RGN_S	14
+#define CPFL_ACT_24B_SET_Q_Q_RGN_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_Q_Q_RGN_S)
+#define CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS	CPFL_MAKE_MASK32(1, 17)
+#define CPFL_ACT_24B_SET_Q_DST_PE_S	21
+#define CPFL_ACT_24B_SET_Q_DST_PE_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_SET_Q_DST_PE_S)
+#define CPFL_ACT_24B_SET_Q_VALID	CPFL_MAKE_MASK32(1, 23)
+
+/* 24-Bit MOD_PROFILE Action */
+enum cpfl_act_mod_profile_hint {
+	CPFL_ACT_MOD_PROFILE_NO_ADDR = 0, /* No associated MOD_ADDR action */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_128B, /* Prefetch 128B using MOD_ADDR */
+	CPFL_ACT_MOD_PROFILE_PREFETCH_256B, /* Prefetch 256B using MOD_ADDR */
+};
+
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_S		0
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_M		\
+	CPFL_MAKE_MASK32(11, CPFL_ACT_24B_MOD_PROFILE_PROF_S)
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S	12
+#define CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M	\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S)
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_S		14
+#define CPFL_ACT_24B_MOD_PROFILE_HINT_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_MOD_PROFILE_HINT_S)
+#define CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS		((uint32_t)1 << 16)
+#define CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND	((uint32_t)1 << 17)
+#define CPFL_ACT_24B_MOD_PROFILE_VALID			((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES	4
+#define CPFL_ACT_24B_MOD_PROFILE_PROF_CNT		2048
+
+/* 24-Bit METER Actions */
+#define CPFL_ACT_24B_METER_INDEX_S	0
+#define CPFL_ACT_24B_METER_INDEX_M	\
+	CPFL_MAKE_MASK32(20, CPFL_ACT_24B_METER_INDEX_S)
+#define CPFL_ACT_24B_METER_BANK_S	20
+#define CPFL_ACT_24B_METER_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_METER_BANK_S)
+#define CPFL_ACT_24B_METER_VALID	((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_METER_BANK_CNT	6
+#define CPFL_ACT_24B_METER_INDEX_CNT	((uint32_t)1 << 20)
+
+/* Group B */
+#define CPFL_ACT_OP_24B_B_S	27
+#define CPFL_ACT_OP_24B_B_M	CPFL_MAKE_MASK32(2, CPFL_ACT_OP_24B_B_S)
+#define CPFL_ACT_24B_B_INDEX_S	24
+#define CPFL_ACT_24B_B_INDEX_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_B_INDEX_S)
+#define CPFL_ACT_24B_B_VAL_S	0
+#define CPFL_ACT_24B_B_VAL_M	CPFL_MAKE_MASK32(24, CPFL_ACT_24B_B_VAL_S)
+
+#define CPFL_ACT_OP_24B_B	((uint32_t)1 << CPFL_ACT_OP_24B_B_S)
+
+#define CPFL_ACT_MAKE_24B_B(prec, idx, val) \
+	((CPFL_ACT_PREC_SET(prec)) | CPFL_ACT_OP_24B_B | \
+	 (((uint32_t)(idx) << CPFL_ACT_24B_B_INDEX_S) & CPFL_ACT_24B_B_INDEX_M) | \
+	 (((uint32_t)(val) << CPFL_ACT_24B_B_VAL_S) & CPFL_ACT_24B_B_VAL_M))
+
+#define CPFL_ACT_24B_INDEX_SET_MD	0
+#define CPFL_ACT_24B_INDEX_RANGE_CHECK	6
+#define CPFL_ACT_24B_SET_MD_SLOTS	6
+
+/* Set/Add/Delete Metadata Actions - SET_MD[0-5], DEL_MD */
+/* 8-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD8_VAL_S	0
+#define CPFL_ACT_24B_SET_MD8_VAL_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_VAL_S)
+#define CPFL_ACT_24B_SET_MD8_MASK_S	8
+#define CPFL_ACT_24B_SET_MD8_MASK_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD8_MASK_S)
+#define CPFL_ACT_24B_SET_MD8_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD8_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD8_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD8_TYPE_ID_S)
+/* 16-Bit SET_MD */
+#define CPFL_ACT_24B_SET_MD16_VAL_S	0
+#define CPFL_ACT_24B_SET_MD16_VAL_M	\
+	CPFL_MAKE_MASK32(16, CPFL_ACT_24B_SET_MD16_VAL_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_L_S	16 /* For chained action */
+#define CPFL_ACT_24B_SET_MD16_MASK_L_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD16_MASK_L_S)
+#define CPFL_ACT_24B_SET_MD16_MASK_H_SR	8
+#define CPFL_ACT_24B_SET_MD16_MASK_H_M	0xff
+#define CPFL_ACT_24B_SET_MD16_OFFSET_S	16
+#define CPFL_ACT_24B_SET_MD16_OFFSET_M	\
+	CPFL_MAKE_MASK32(4, CPFL_ACT_24B_SET_MD16_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_S	20
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_SET_MD16_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD16		((uint32_t)1 << 23)
+
+#define CPFL_ACT_24B_SET_MD32_VAL_L_M	CPFL_MAKE_MASK32(24, 0)
+
+#define CPFL_ACT_24B_SET_MD8_OFFSET_MAX		15
+#define CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX	7
+#define CPFL_ACT_24B_SET_MD16_OFFSET_MAX	15
+#define CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX	7
+
+/* RANGE_CHECK Action */
+enum cpfl_rule_act_rc_mode {
+	CPFL_RULE_ACT_RC_1_RANGE = 0,
+	CPFL_RULE_ACT_RC_2_RANGES = 1,
+	CPFL_RULE_ACT_RC_4_RANGES = 2,
+	CPFL_RULE_ACT_RC_8_RANGES = 3
+};
+
+#define CPFL_ACT_24B_RC_TBL_IDX_S	0
+#define CPFL_ACT_24B_RC_TBL_IDX_M	\
+	CPFL_MAKE_MASK32(13, CPFL_ACT_24B_RC_TBL_IDX_S)
+#define CPFL_ACT_24B_RC_START_BANK_S	13
+#define CPFL_ACT_24B_RC_START_BANK_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_24B_RC_START_BANK_S)
+#define CPFL_ACT_24B_RC_MODE_S		16
+#define CPFL_ACT_24B_RC_MODE_M		\
+	CPFL_MAKE_MASK32(2, CPFL_ACT_24B_RC_MODE_S)
+#define CPFL_ACT_24B_RC_XTRACT_PROF_S	18
+#define CPFL_ACT_24B_RC_XTRACT_PROF_M	\
+	CPFL_MAKE_MASK32(6, CPFL_ACT_24B_RC_XTRACT_PROF_S)
+
+#define CPFL_ACT_24B_RC_TBL_INDEX_CNT	(8 * 1024)
+#define CPFL_ACT_24B_RC_BANK_CNT	8
+#define CPFL_ACT_24B_RC_XTRACT_PROF_CNT	64
+
+/*******************************************************************************
+ * 24-Bit Chained Auxiliary Actions
+ ******************************************************************************/
+
+/* TODO: HAS is being updated.  Revise the order of chained and base action
+ * when the HAS has it finalized.
+ */
+/**
+ * 24-Bit Chained SET_MD Actions
+ *
+ * Chained SET_MD actions consume two consecutive action sets.  The first one is
+ * the chained AUX action set.  The second one is the base/parent action set.
+ * Chained SET_MD actions can add and/or update metadata structure with IDs from
+ * 0 to 31 while the non-chained SET_MD variants can only update existing meta-
+ * data IDs below 16.
+ */
+
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_S	8
+#define CPFL_ACT_24B_SET_MD_AUX_OFFSET_M	\
+	CPFL_MAKE_MASK32(7, CPFL_ACT_24B_SET_MD_AUX_OFFSET_S)
+#define CPFL_ACT_24B_SET_MD_AUX_ADD		((uint32_t)1 << 15)
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S	16
+#define CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M	\
+	CPFL_MAKE_MASK32(5, CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S)
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_S		0
+#define CPFL_ACT_24B_SET_MD_AUX_DATA_M		\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S	0
+#define CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_M	\
+	CPFL_MAKE_MASK32(8, CPFL_ACT_24B_SET_MD_AUX_16B_MASK_H_S)
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR	24 /* Upper 8 bits of MD32 */
+#define CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_M	0xff
+
+#define CPFL_ACT_TYPE_CHAIN_DATA_S	29
+#define CPFL_ACT_TYPE_CHAIN_DATA_M	\
+	CPFL_MAKE_MASK32(3, CPFL_ACT_TYPE_CHAIN_DATA_S)
+#define CPFL_ACT_TYPE_CHAIN_DATA	((uint32_t)1 << CPFL_ACT_TYPE_CHAIN_DATA_S)
+
+#define CPFL_ACT_24B_SET_MD_OP_S	21
+#define CPFL_ACT_24B_SET_MD_OP_8B	((uint32_t)0 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_16B	((uint32_t)1 << CPFL_ACT_24B_SET_MD_OP_S)
+#define CPFL_ACT_24B_SET_MD_OP_32B	((uint32_t)2 << CPFL_ACT_24B_SET_MD_OP_S)
+
+#define CPFL_ACT_24B_SET_MD_AUX_MAKE(op, mid, off, data) \
+	(CPFL_ACT_TYPE_CHAIN_DATA | (op) | \
+	 (((uint32_t)(mid) << CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_TYPE_ID_M) | \
+	 (((uint32_t)(off) << CPFL_ACT_24B_SET_MD_AUX_OFFSET_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_OFFSET_M) | \
+	 (((uint32_t)(data) << CPFL_ACT_24B_SET_MD_AUX_DATA_S) & \
+		CPFL_ACT_24B_SET_MD_AUX_DATA_M))
+
+/*******************************************************************************
+ * 1-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_drop - Encode a 1-bit DROP action
+ *
+ * The DROP action has precedence over the DEFER_DOP action.
+ * Affect of ACT_COMMIT action on the DROP action:
+ *  - CPFL_ACT_COMMIT_ALL: Packet is dropped.
+ *  - CPFL_ACT_COMMIT_PRE_MOD or CPFL_ACT_COMMIT_NONE: Packet is not dropped.
+ *  - CPFL_ACT_COMMIT_RECIR_ALL: Packet is dropped.  Recirculation is canceled.
+ *  - CPFL_ACT_COMMIT_RECIR_PRE_MOD or CPFL_ACT_COMMIT_RECIR_NONE: Packet is not
+ *    dropped. Recirculation continues.
+ *
+ * Once a DROP action is set, it cannot be reverted during the classification
+ * process of a network packet.
+ */
+static inline union cpfl_action_set
+cpfl_act_drop(uint8_t prec)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_OP_DROP, 1);
+	return a;
+}
+
+/**
+ * cpfl_act_set_commit_mode - Encode a 1-bit ACT_COMMIT action
+ * An ACT_COMMIT action specifies if and when all actions are committed.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_commit_mode(uint8_t prec, enum cpfl_act_commit_mode mode)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec))
+		return cpfl_act_nop();
+	a.data = CPFL_ACT_MAKE_1B(prec, CPFL_ACT_1B_COMMIT_MODE_M,
+				  (uint32_t)mode << CPFL_ACT_1B_COMMIT_MODE_S);
+	return a;
+}
+
+/*******************************************************************************
+ * 8-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_meta - Encode an 8-bit MOD_META action
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_meta(uint8_t prec, uint8_t prof)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || prof >= CPFL_ACT_8B_MOD_META_PROF_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_8B(prec, CPFL_ACT_8B_INDEX_MOD_META,
+				  CPFL_ACT_8B_MOD_META_VALID | prof);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 16-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_fwd_vsi - Encode a 16-bit SET_VSI action (forward to a VSI)
+ *
+ * This encodes the "Forward to Single VSI" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_vsi(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint16_t vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    vsi >= CPFL_ACT_16B_FWD_VSI_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_VSI, pe, vsi);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_fwd_port - Encode a 16-bit SET_VSI action (forward to a port)
+ *
+ * This encodes the "Forward to a port" variant of SET_VSI action.
+ * SEM can use both SET_VSI action slots.  The other classification blocks can
+ * only use slot 0.
+ */
+static inline union cpfl_action_set
+cpfl_act_fwd_port(uint8_t slot, uint8_t prec, enum cpfl_prot_eng pe, uint8_t port)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_16B_SET_VSI_SLOTS ||
+	    port >= CPFL_ACT_16B_FWD_PORT_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_16B_MAKE_SET_VSI(CPFL_ACT_FWD_PORT, pe, port);
+	a.data = CPFL_ACT_MAKE_16B(prec, CPFL_ACT_16B_INDEX_SET_VSI + slot,
+				   val);
+
+	return a;
+}
+
+/*******************************************************************************
+ * 24-Bit Action Factory
+ ******************************************************************************/
+
+/**
+ * cpfl_act_mod_addr - Encode a 24-bit MOD_ADDR action
+ *
+ * This MOD_ADDR specifies the index of the MOD content entry an accompanying
+ * MOD_PROFILE action uses.  Some MOD_PROFILE actions may need to use extra
+ * information from a Modify content entry, and requires an accompanying
+ * MOD_ADDR action.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_addr(uint8_t prec, uint32_t mod_addr)
+{
+	union cpfl_action_set a;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || mod_addr >= CPFL_ACT_24B_MOD_ADDR_CNT)
+		return cpfl_act_nop();
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_ADDR,
+				     mod_addr);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue - Encode a 24-bit SET_Q action (one queue variant)
+ *
+ * This action is a "Forward to a single queue" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q,
+			bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q >= CPFL_ACT_24B_SET_Q_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_hash_queue_region - Encode a 24-bit SET_Q action (queue region)
+ *
+ * This action is a "Forward to a queue region" variant of the SET_Q action.
+ *
+ * SEM performs Implicit VSI for SET_Q action when "no_impliciti_vsi" is false.
+ * WCM and LEM never perform Implicit VSI for SET_Q actions.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_hash_queue_region(uint8_t prec, enum cpfl_prot_eng pe, uint16_t q_base,
+			       uint8_t q_rgn_bits, bool no_implicit_vsi)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || q_base >= CPFL_ACT_24B_SET_Q_CNT ||
+	    q_rgn_bits > CPFL_ACT_24B_SET_Q_Q_RGN_BITS)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_SET_Q_VALID | (uint32_t)q_base |
+		((uint32_t)q_rgn_bits << CPFL_ACT_24B_SET_Q_Q_RGN_S) |
+		(((uint32_t)pe << CPFL_ACT_24B_SET_Q_DST_PE_S) &
+			CPFL_ACT_24B_SET_Q_DST_PE_M);
+	if (no_implicit_vsi)
+		val |= CPFL_ACT_24B_SET_Q_IMPLICIT_VSI_DIS;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_SET_Q, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_mod_profile - Encode a 24-bit MOD_PROFILE action
+ *
+ * This action specifies a Modify profile to use for modifying the network
+ * packet being classified.  In addition, it also provides a hint to whether
+ * or not an accompanied MOD_ADDR action is expected and should be prefetched.
+ *
+ * There is only one MOD_PROFILE action slot.  If multiple classification blocks
+ * emit this action, the precedence value and auxiliary precedence value will be
+ * used to select one with higher precedence.
+ */
+static inline union cpfl_action_set
+cpfl_act_mod_profile(uint8_t prec, uint16_t prof, uint8_t ptype_xltn_idx, bool append_act_bus,
+		     bool miss_prepend, enum cpfl_act_mod_profile_hint hint)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) ||
+	    prof >= CPFL_ACT_24B_MOD_PROFILE_PROF_CNT ||
+	    ptype_xltn_idx >= CPFL_ACT_24B_MOD_PROFILE_PTYPE_XLTN_INDEXES)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_MOD_PROFILE_VALID |
+		(((uint32_t)hint << CPFL_ACT_24B_MOD_PROFILE_HINT_S) &
+			CPFL_ACT_24B_MOD_PROFILE_HINT_M) |
+		(((uint32_t)ptype_xltn_idx << CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_S) &
+			CPFL_ACT_24B_MOD_PROFILE_XTLN_IDX_M) |
+		((uint32_t)prof << CPFL_ACT_24B_MOD_PROFILE_PROF_S);
+	if (append_act_bus)
+		val |= CPFL_ACT_24B_MOD_PROFILE_APPEND_ACT_BUS;
+	if (miss_prepend)
+		val |= CPFL_ACT_24B_MOD_PROFILE_SET_MISS_PREPEND;
+
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_MOD_PROFILE, val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_meter - Encode a 24-bit METER action
+ *
+ * Return NOP if any given input parameter is invalid.
+ *
+ * A bank can only be used by one of the METER action slots.  If multiple METER
+ * actions select the same bank, the action with the highest action slot wins.
+ * In Policer mode, METER actions at the higher indexes have precedence over
+ * ones at lower indexes.
+ */
+static inline union cpfl_action_set
+cpfl_act_meter(uint8_t slot, uint8_t prec, uint32_t idx, uint8_t bank)
+{
+	union cpfl_action_set a;
+	uint32_t val;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_METER_SLOTS  ||
+	    idx >= CPFL_ACT_24B_METER_INDEX_CNT ||
+	    bank >= CPFL_ACT_24B_METER_BANK_CNT)
+		return cpfl_act_nop();
+
+	val = CPFL_ACT_24B_METER_VALID |
+		(uint32_t)idx << CPFL_ACT_24B_METER_INDEX_S |
+		(uint32_t)bank << CPFL_ACT_24B_METER_BANK_S;
+	a.data = CPFL_ACT_MAKE_24B_A(prec, CPFL_ACT_24B_INDEX_METER + slot,
+				     val);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md8 - Encode a 24-bit SET_MD/8 action for an action slot
+ *
+ * This SET_MD action sets/updates a byte of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 bytes of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md8(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t off, uint8_t val, uint8_t mask)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD8_TYPE_ID_MAX ||
+	    off > CPFL_ACT_24B_SET_MD8_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)mid << CPFL_ACT_24B_SET_MD8_TYPE_ID_S) |
+		((uint32_t)off << CPFL_ACT_24B_SET_MD8_OFFSET_S) |
+		((uint32_t)mask << CPFL_ACT_24B_SET_MD8_MASK_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD8_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md16 - Encode a 24-bit SET_MD/16 action for an action slot
+ *
+ * This SET_MD action sets/updates a word of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action variant can only set
+ * one the first 16 words of any of the first 7 metadata types.
+ */
+static inline union cpfl_action_set
+cpfl_act_set_md16(uint8_t slot, uint8_t prec, uint8_t mid, uint8_t word_off, uint16_t val)
+{
+	union cpfl_action_set a;
+	uint32_t tmp;
+
+	if (!CPFL_ACT_PREC_CHECK(prec) || slot >= CPFL_ACT_24B_SET_MD_SLOTS ||
+	    mid > CPFL_ACT_24B_SET_MD16_TYPE_ID_MAX ||
+	    word_off > CPFL_ACT_24B_SET_MD16_OFFSET_MAX)
+		return cpfl_act_nop();
+
+	tmp = ((uint32_t)CPFL_ACT_24B_SET_MD16) |
+		((uint32_t)mid << CPFL_ACT_24B_SET_MD16_TYPE_ID_S) |
+		((uint32_t)word_off << CPFL_ACT_24B_SET_MD16_OFFSET_S) |
+		((uint32_t)val << CPFL_ACT_24B_SET_MD16_VAL_S);
+	a.data = CPFL_ACT_MAKE_24B_B(prec, CPFL_ACT_24B_INDEX_SET_MD + slot,
+				     tmp);
+
+	return a;
+}
+
+/**
+ * cpfl_act_set_md32_ext - Encode a 24-bit SET_MD/32 action for an action slot
+ *
+ * This SET_MD action sets/updates a dword of a given metadata ID structure
+ * using one of the SET_MD action slots.  This action is made up of 2 chained
+ * action sets.  The chained action set is the first.  The base/parent action
+ * sets is the second.
+ */
+static inline void
+cpfl_act_set_md32_ext(struct cpfl_action_set_ext *ext, uint8_t slot, uint8_t prec, uint8_t mid,
+		      uint8_t off, uint32_t val)
+{
+	if (slot >= CPFL_ACT_24B_SET_MD_SLOTS || !CPFL_ACT_PREC_CHECK(prec) ||
+	    mid >= CPFL_METADATA_ID_CNT ||
+	    (off + sizeof(uint32_t)) > CPFL_METADATA_STRUCT_MAX_SZ) {
+		ext->acts[0] = cpfl_act_nop();
+		ext->acts[1] = cpfl_act_nop();
+	} else {
+		uint32_t tmp;
+
+		/* Chained action set comes first */
+		tmp = val >> CPFL_ACT_24B_SET_MD_AUX_32B_VAL_H_SR;
+		ext->acts[0].data =
+			CPFL_ACT_24B_SET_MD_AUX_MAKE(CPFL_ACT_24B_SET_MD_OP_32B,
+						     mid, off, tmp);
+
+		/* Lower 24 bits of value */
+		tmp = val & CPFL_ACT_24B_SET_MD32_VAL_L_M;
+		ext->acts[1].data =
+			CPFL_ACT_MAKE_24B_B(prec,
+					    CPFL_ACT_24B_INDEX_SET_MD + slot,
+					    tmp);
+	}
+}
+
+#endif /* _CPFL_ACTIONS_H_ */
diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c
new file mode 100644
index 0000000000..476c78f235
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.c
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include "cpfl_controlq.h"
+#include "base/idpf_controlq.h"
+#include "rte_common.h"
+
+/**
+ * cpfl_check_dma_mem_parameters - verify DMA memory params from CP
+ * @qinfo: pointer to create control queue info struct
+ *
+ * Verify that DMA parameter of each DMA memory struct is present and
+ * consistent with control queue parameters
+ */
+static inline int
+cpfl_check_dma_mem_parameters(struct cpfl_ctlq_create_info *qinfo)
+{
+	struct idpf_dma_mem *ring = &qinfo->ring_mem;
+	struct idpf_dma_mem *buf = &qinfo->buf_mem;
+
+	if (!ring->va || !ring->size)
+		return -EBADR;
+
+	if (ring->size != qinfo->len * sizeof(struct idpf_ctlq_desc))
+		return -EINVAL;
+
+	/* no need for buffer checks for TX queues */
+	if (qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_CONFIG_TX ||
+	    qinfo->type == IDPF_CTLQ_TYPE_RDMA_TX)
+		return 0;
+
+	if (!buf->va || !buf->size)
+		return -EBADR;
+
+	/* accommodate different types of rx ring buffer sizes */
+	if ((qinfo->type == IDPF_CTLQ_TYPE_MAILBOX_RX &&
+	     buf->size != CPFL_CTLQ_MAILBOX_BUFFER_SIZE * qinfo->len) ||
+	    (qinfo->type == IDPF_CTLQ_TYPE_CONFIG_RX &&
+	     buf->size != CPFL_CFGQ_RING_LEN * CPFL_CTLQ_CFGQ_BUFFER_SIZE))
+		return -EBADR;
+
+	return 0;
+}
+
+/**
+ * cpfl_ctlq_alloc_ring_res - store memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ * @qinfo: pointer to create queue info struct
+ *
+ * The CP takes care of all DMA memory allocations. Store the allocated memory
+ * information for the descriptor ring and buffers. If the memory for either the
+ * descriptor ring or the buffers is not allocated properly and/or inconsistent
+ * with the control queue parameters, this routine will free the memory for
+ * both the descriptors and the buffers
+ */
+int
+cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq,
+			 struct cpfl_ctlq_create_info *qinfo)
+{
+	int ret_code = 0;
+	unsigned int elem_size;
+	int i = 0;
+
+	ret_code = cpfl_check_dma_mem_parameters(qinfo);
+	if (ret_code)
+		/* TODO: Log an error message per CP */
+		goto err;
+
+	cq->desc_ring.va = qinfo->ring_mem.va;
+	cq->desc_ring.pa = qinfo->ring_mem.pa;
+	cq->desc_ring.size = qinfo->ring_mem.size;
+
+	switch (cq->cq_type) {
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_RDMA_RX:
+		/* Only receive queues will have allocated buffers
+		 * during init.  CP allocates one big chunk of DMA
+		 * region who size is equal to ring_len * buff_size.
+		 * In CPFLib, the block gets broken down to multiple
+		 * smaller blocks that actually gets programmed in the hardware.
+		 */
+
+		cq->bi.rx_buff = (struct idpf_dma_mem **)
+			idpf_calloc(hw, cq->ring_size,
+				    sizeof(struct idpf_dma_mem *));
+		if (!cq->bi.rx_buff) {
+			ret_code = -ENOMEM;
+			/* TODO: Log an error message per CP */
+			goto err;
+		}
+
+		elem_size = qinfo->buf_size;
+		for (i = 0; i < cq->ring_size; i++) {
+			cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc
+					    (hw, 1,
+					     sizeof(struct idpf_dma_mem));
+			if (!cq->bi.rx_buff[i]) {
+				ret_code = -ENOMEM;
+				goto free_rx_buffs;
+			}
+			cq->bi.rx_buff[i]->va =
+			    (uint64_t *)((char *)qinfo->buf_mem.va + (i * elem_size));
+			cq->bi.rx_buff[i]->pa = qinfo->buf_mem.pa +
+					       (i * elem_size);
+			cq->bi.rx_buff[i]->size = elem_size;
+		}
+		break;
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_RDMA_TX:
+	case IDPF_CTLQ_TYPE_RDMA_COMPL:
+		break;
+	default:
+		ret_code = -EBADR;
+	}
+
+	return ret_code;
+
+free_rx_buffs:
+	i--;
+	for (; i >= 0; i--)
+		idpf_free(hw, cq->bi.rx_buff[i]);
+
+	if (!cq->bi.rx_buff)
+		idpf_free(hw, cq->bi.rx_buff);
+
+err:
+	return ret_code;
+}
+
+/**
+ * cpfl_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void
+cpfl_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	for (i = 0; i < cq->ring_size; i++) {
+		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+		/* No buffer to post to descriptor, continue */
+		if (!bi)
+			continue;
+
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+		desc->opcode = 0;
+		desc->datalen = CPU_TO_LE16(bi->size);
+		desc->ret_val = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+		desc->params.indirect.param0 = 0;
+		desc->params.indirect.param1 = 0;
+	}
+}
+
+/**
+ * cpfl_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+cpfl_ctlq_setup_regs(struct idpf_ctlq_info *cq, struct cpfl_ctlq_create_info *q_create_info)
+{
+	/* set control queue registers in our local struct */
+	cq->reg.head = q_create_info->reg.head;
+	cq->reg.tail = q_create_info->reg.tail;
+	cq->reg.len = q_create_info->reg.len;
+	cq->reg.bah = q_create_info->reg.bah;
+	cq->reg.bal = q_create_info->reg.bal;
+	cq->reg.len_mask = q_create_info->reg.len_mask;
+	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+	cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * cpfl_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void
+cpfl_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, bool is_rxq)
+{
+	/* Update tail to post pre-allocated buffers for rx queues */
+	if (is_rxq)
+		wr32(hw, cq->reg.tail, (uint32_t)(cq->ring_size - 1));
+
+	/* For non-Mailbox control queues only TAIL need to be set */
+	if (cq->q_id != -1)
+		return;
+
+	/* Clear Head for both send or receive */
+	wr32(hw, cq->reg.head, 0);
+
+	/* set starting point */
+	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * cpfl_ctlq_dealloc_ring_res - free up the descriptor buffer structure
+ * @hw: context info for the callback
+ * @cq: pointer to the specific control queue
+ *
+ * DMA buffers are released by the CP itself
+ */
+static void
+cpfl_ctlq_dealloc_ring_res(struct idpf_hw *hw __rte_unused, struct idpf_ctlq_info *cq)
+{
+	int i;
+
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX ||
+	    cq->cq_type == IDPF_CTLQ_TYPE_CONFIG_RX) {
+		for (i = 0; i < cq->ring_size; i++)
+			idpf_free(hw, cq->bi.rx_buff[i]);
+		/* free the buffer header */
+		idpf_free(hw, cq->bi.rx_buff);
+	} else {
+		idpf_free(hw, cq->bi.tx_msg);
+	}
+}
+
+/**
+ * cpfl_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ */
+int
+cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+	      struct idpf_ctlq_info **cq_out)
+{
+	struct idpf_ctlq_info *cq;
+	bool is_rxq = false;
+	int status = 0;
+
+	if (!qinfo->len || !qinfo->buf_size ||
+	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+		return -EINVAL;
+
+	cq = (struct idpf_ctlq_info *)
+	     idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+
+	if (!cq)
+		return -ENOMEM;
+
+	cq->cq_type = qinfo->type;
+	cq->q_id = qinfo->id;
+	cq->buf_size = qinfo->buf_size;
+	cq->ring_size = qinfo->len;
+
+	cq->next_to_use = 0;
+	cq->next_to_clean = 0;
+	cq->next_to_post = cq->ring_size - 1;
+
+	switch (qinfo->type) {
+	case IDPF_CTLQ_TYPE_EVENT_RX:
+	case IDPF_CTLQ_TYPE_CONFIG_RX:
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+		is_rxq = true;
+		/* fallthrough */
+	case IDPF_CTLQ_TYPE_CONFIG_TX:
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+		status = cpfl_ctlq_alloc_ring_res(hw, cq, qinfo);
+		break;
+
+	default:
+		status = -EINVAL;
+		break;
+	}
+
+	if (status)
+		goto init_free_q;
+
+	if (is_rxq) {
+		cpfl_ctlq_init_rxq_bufs(cq);
+	} else {
+		/* Allocate the array of msg pointers for TX queues */
+		cq->bi.tx_msg = (struct idpf_ctlq_msg **)
+			idpf_calloc(hw, qinfo->len,
+				    sizeof(struct idpf_ctlq_msg *));
+		if (!cq->bi.tx_msg) {
+			status = -ENOMEM;
+			goto init_dealloc_q_mem;
+		}
+	}
+
+	cpfl_ctlq_setup_regs(cq, qinfo);
+
+	cpfl_ctlq_init_regs(hw, cq, is_rxq);
+
+	idpf_init_lock(&cq->cq_lock);
+
+	LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
+
+	*cq_out = cq;
+	return status;
+
+init_dealloc_q_mem:
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+	idpf_free(hw, cq);
+	cq = NULL;
+
+	return status;
+}
+
+int
+cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo,
+		    struct idpf_ctlq_info **cq)
+{
+	return cpfl_ctlq_add(hw, qinfo, cq);
+}
+
+/**
+ * cpfl_ctlq_shutdown - shutdown the CQ
+ * The main shutdown routine for any controq queue
+ */
+static void
+cpfl_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (!cq->ring_size)
+		goto shutdown_sq_out;
+
+	/* free ring buffers and the ring itself */
+	cpfl_ctlq_dealloc_ring_res(hw, cq);
+
+	/* Set ring_size to 0 to indicate uninitialized queue */
+	cq->ring_size = 0;
+
+shutdown_sq_out:
+	idpf_release_lock(&cq->cq_lock);
+	idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * cpfl_ctlq_remove - deallocate and remove specified control queue
+ */
+static void
+cpfl_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	LIST_REMOVE(cq, cq_list);
+	cpfl_ctlq_shutdown(hw, cq);
+	idpf_free(hw, cq);
+}
+
+void
+cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	cpfl_ctlq_remove(hw, cq);
+}
diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h
new file mode 100644
index 0000000000..930d717f63
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_controlq.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CONTROLQ_H_
+#define _CPFL_CONTROLQ_H_
+
+#include "base/idpf_osdep.h"
+#include "base/idpf_controlq_api.h"
+
+#define CPFL_CTLQ_DESCRIPTOR_SIZE	32
+#define CPFL_CTLQ_MAILBOX_BUFFER_SIZE	4096
+#define CPFL_CTLQ_CFGQ_BUFFER_SIZE	256
+#define CPFL_DFLT_MBX_RING_LEN		512
+#define CPFL_CFGQ_RING_LEN		512
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct cpfl_ctlq_create_info {
+	enum idpf_ctlq_type type;
+	int id; /* absolute queue offset passed as input
+		 * -1 for default mailbox if present
+		 */
+	uint16_t len; /* Queue length passed as input */
+	uint16_t buf_size; /* buffer size passed as input */
+	uint64_t base_address; /* output, HPA of the Queue start  */
+	struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+	/* Pass down previously allocated descriptor ring and buffer memory
+	 * for each control queue to be created
+	 */
+	struct idpf_dma_mem ring_mem;
+	/* The CP will allocate one large buffer that the CPFlib will piece
+	 * into individual buffers for each descriptor
+	 */
+	struct idpf_dma_mem buf_mem;
+
+	int ext_info_size;
+	void *ext_info; /* Specific to q type */
+};
+
+int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw,
+			     struct idpf_ctlq_info *cq,
+			     struct cpfl_ctlq_create_info *qinfo);
+int cpfl_ctlq_add(struct idpf_hw *hw,
+		  struct cpfl_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq);
+int cpfl_vport_ctlq_add(struct idpf_hw *hw,
+			struct cpfl_ctlq_create_info *qinfo,
+			struct idpf_ctlq_info **cq);
+void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+#endif
diff --git a/drivers/net/cpfl/cpfl_rules.c b/drivers/net/cpfl/cpfl_rules.c
new file mode 100644
index 0000000000..eefae1767c
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.c
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#include <base/idpf_controlq.h>
+#include <stdint.h>
+#include "cpfl_rules.h"
+
+ /**
+  * cpfl_prep_rule_desc_common_ctx - get bit common context for descriptor
+  */
+static inline uint64_t
+cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
+{
+	uint64_t context = 0;
+
+	switch (cmn_cfg->opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		/* fallthrough */
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+	case cpfl_ctlq_sem_add_rule:
+	case cpfl_ctlq_sem_del_rule:
+	case cpfl_ctlq_sem_query_rule:
+	case cpfl_ctlq_sem_update_rule:
+		context |= SHIFT_VAL64(cmn_cfg->time_sel,
+				       MEV_RULE_TIME_SEL);
+		context |= SHIFT_VAL64(cmn_cfg->time_sel_val,
+				       MEV_RULE_TIME_SEL_VAL);
+		context |= SHIFT_VAL64(cmn_cfg->host_id,
+				       MEV_RULE_HOST_ID);
+		context |= SHIFT_VAL64(cmn_cfg->port_num,
+				       MEV_RULE_PORT_NUM);
+		context |= SHIFT_VAL64(cmn_cfg->resp_req,
+				       MEV_RULE_RESP_REQ);
+		context |= SHIFT_VAL64(cmn_cfg->cache_wr_thru,
+				       MEV_RULE_CACHE_WR_THRU);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc_ctx - get bit context for descriptor
+ */
+static inline uint64_t
+cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
+{
+	uint64_t context = 0;
+
+	context |= cpfl_prep_rule_desc_common_ctx(&cfg_data->common);
+
+	switch (cfg_data->common.opc) {
+	case cpfl_ctlq_mod_query_rule:
+	case cpfl_ctlq_mod_add_update_rule:
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.obj_size,
+				       MEV_RULE_MOD_OBJ_SIZE);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.pin_content,
+				       MEV_RULE_PIN_MOD_CONTENT);
+		context |= SHIFT_VAL64(cfg_data->ext.mod_content.index,
+				       MEV_RULE_MOD_INDEX);
+		break;
+	case cpfl_ctlq_sem_query_rule_hash_addr:
+	case cpfl_ctlq_sem_query_del_rule_hash_addr:
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
+				       MEV_RULE_OBJ_ID);
+		context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
+				       MEV_RULE_OBJ_ADDR);
+		break;
+	default:
+		break;
+	}
+
+	return context;
+}
+
+/**
+ * cpfl_prep_rule_desc - build descriptor data from rule config data
+ *
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg)
+{
+	uint64_t context;
+	uint64_t *ctlq_ctx = (uint64_t *)&ctlq_msg->ctx.indirect.context[0];
+
+	context = cpfl_prep_rule_desc_ctx(cfg_data);
+	*ctlq_ctx = CPU_TO_LE64(context);
+	memcpy(&ctlq_msg->cookie, &cfg_data->common.cookie, sizeof(uint64_t));
+	ctlq_msg->opcode = (uint16_t)cfg_data->common.opc;
+	ctlq_msg->data_len = cfg_data->common.buf_len;
+	ctlq_msg->status = 0;
+	ctlq_msg->ctx.indirect.payload = cfg_data->common.payload;
+}
+
+/**
+ * cpfl_prep_sem_rule_blob - build SEM rule blob data from rule entry info
+ * note: call this function before sending rule to HW via fast path
+ */
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob)
+{
+	uint32_t *act_dst = (uint32_t *)&rule_blob->sem_rule.actions;
+	const uint32_t *act_src = (const uint32_t *)act_bytes;
+	uint32_t i;
+
+	idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
+	idpf_memcpy(rule_blob->sem_rule.key, key, key_byte_len,
+		    CPFL_NONDMA_TO_DMA);
+
+	for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
+		*act_dst++ = CPU_TO_LE32(*act_src++);
+
+	*((uint16_t *)&rule_blob->sem_rule.cfg_ctrl) = CPU_TO_LE16(cfg_ctrl);
+}
diff --git a/drivers/net/cpfl/cpfl_rules.h b/drivers/net/cpfl/cpfl_rules.h
new file mode 100644
index 0000000000..d23eae8e91
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_rules.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2023 Intel Corporation
+ */
+
+#ifndef _CPFL_RULES_API_H_
+#define _CPFL_RULES_API_H_
+
+#include <base/idpf_controlq_api.h>
+#include "cpfl_actions.h"
+#include "cpfl_controlq.h"
+
+/* Common Bit Mask Macros */
+#define CPFL_BIT(b)			(1 << (b))
+
+#define MAKE_MASK(type, mask, shift)	((u##type) (mask) << (shift))
+#define SHIFT_VAL_LT(type, val, field)		\
+		(((u##type)(val) << field##_S) & field##_M)
+#define SHIFT_VAL_RT(type, val, field)		\
+		(((u##type)(val) & field##_M) >> field##_S)
+
+#define MAKE_MASK_VAL(type, bit_len)	(((u##type)0x01 << (bit_len)) - 1)
+#define MAKE_MASK_VAL16(bit_len)	MAKE_MASK_VAL(16, bit_len)
+#define MAKE_MASK_VAL64(bit_len)	MAKE_MASK_VAL(64, bit_len)
+
+#define MAKE_MASK64(mask, shift)	MAKE_MASK(64, mask, shift)
+#define MAKE_MASK16(mask, shift)	MAKE_MASK(16, mask, shift)
+#define MAKE_MASK32(mask, shift)	MAKE_MASK(32, mask, shift)
+
+/* Make masks with bit length and left-shifting count */
+#define MAKE_SMASK(type, bits, shift)	\
+	((((u##type)1 << (bits)) - 1) << (shift))
+#define MAKE_SMASK64(bits, shift)	MAKE_SMASK(64, bits, shift)
+#define MAKE_SMASK32(bits, shift)	MAKE_SMASK(32, bits, shift)
+#define MAKE_SMASK16(bits, shift)	MAKE_SMASK(16, bits, shift)
+
+#define SHIFT_VAL64(val, field)		SHIFT_VAL_LT(64, val, field)
+#define SHIFT_VAL32(val, field)		SHIFT_VAL_LT(32, val, field)
+#define SHIFT_VAL16(val, field)		SHIFT_VAL_LT(16, val, field)
+
+/* Rule Config queue opcodes */
+enum cpfl_ctlq_rule_cfg_opc {
+	cpfl_ctlq_sem_add_rule				= 0x1303,
+	cpfl_ctlq_sem_update_rule			= 0x1304,
+	cpfl_ctlq_sem_del_rule				= 0x1305,
+	cpfl_ctlq_sem_query_rule			= 0x1306,
+	cpfl_ctlq_sem_query_rule_hash_addr		= 0x1307,
+	cpfl_ctlq_sem_query_del_rule_hash_addr		= 0x1308,
+
+	cpfl_ctlq_mod_add_update_rule			= 0x1360,
+	cpfl_ctlq_mod_query_rule			= 0x1361,
+};
+
+enum cpfl_cfg_pkt_error_code {
+	CPFL_CFG_PKT_ERR_OK = 0,
+	CPFL_CFG_PKT_ERR_ESRCH = 1,     /* Bad opcode */
+	CPFL_CFG_PKT_ERR_EEXIST = 2,    /* Entry Already exists */
+	CPFL_CFG_PKT_ERR_ENOSPC = 4,    /* No space left in the table*/
+	CPFL_CFG_PKT_ERR_ERANGE = 5,    /* Parameter out of range */
+	CPFL_CFG_PKT_ERR_ESBCOMP = 6,   /* Completion Error */
+	CPFL_CFG_PKT_ERR_ENOPIN = 7,    /* Entry cannot be pinned in cache */
+	CPFL_CFG_PKT_ERR_ENOTFND = 8,   /* Entry Not exists */
+	CPFL_CFG_PKT_ERR_EMAXCOL = 9    /* Max Hash Collision */
+};
+
+/* macros for creating context for rule descriptor */
+#define MEV_RULE_VSI_ID_S		0
+#define MEV_RULE_VSI_ID_M		\
+		MAKE_MASK64(0x7FF, MEV_RULE_VSI_ID_S)
+
+#define MEV_RULE_TIME_SEL_S		13
+#define MEV_RULE_TIME_SEL_M		\
+		MAKE_MASK64(0x3, MEV_RULE_TIME_SEL_S)
+
+#define MEV_RULE_TIME_SEL_VAL_S		15
+#define MEV_RULE_TIME_SEL_VAL_M		\
+		MAKE_MASK64(0x1, MEV_RULE_TIME_SEL_VAL_S)
+
+#define MEV_RULE_PORT_NUM_S		16
+#define MEV_RULE_HOST_ID_S		18
+#define MEV_RULE_PORT_NUM_M		\
+		MAKE_MASK64(0x3, MEV_RULE_PORT_NUM_S)
+#define MEV_RULE_HOST_ID_M		\
+		MAKE_MASK64(0x7, MEV_RULE_HOST_ID_S)
+
+#define MEV_RULE_CACHE_WR_THRU_S	21
+#define MEV_RULE_CACHE_WR_THRU_M	\
+		MAKE_MASK64(0x1, MEV_RULE_CACHE_WR_THRU_S)
+
+#define MEV_RULE_RESP_REQ_S		22
+#define MEV_RULE_RESP_REQ_M		\
+		MAKE_MASK64(0x3, MEV_RULE_RESP_REQ_S)
+#define MEV_RULE_OBJ_ADDR_S		24
+#define MEV_RULE_OBJ_ADDR_M		\
+		MAKE_MASK64(0x7FFFFFF, MEV_RULE_OBJ_ADDR_S)
+#define MEV_RULE_OBJ_ID_S		59
+#define MEV_RULE_OBJ_ID_M		\
+		MAKE_MASK64((uint64_t)0x3, MEV_RULE_OBJ_ID_S)
+
+/* macros for creating CFG_CTRL for sem/lem rule blob */
+#define MEV_RULE_CFG_CTRL_PROF_ID_S			0
+#define MEV_RULE_CFG_CTRL_PROF_ID_M			\
+		MAKE_MASK16(0x7FF, MEV_RULE_CFG_CTRL_PROF_ID_S)
+
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_S		11
+#define MEV_RULE_CFG_CTRL_SUB_PROF_ID_M		\
+		MAKE_MASK16(0x3, MEV_RULE_CFG_CTRL_SUB_PROF_ID_S)
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_S		13
+#define MEV_RULE_CFG_CTRL_PIN_CACHE_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_PIN_CACHE_S)
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S	14
+#define MEV_RULE_CFG_CTRL_CLEAR_MIRROR_M	\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_CLEAR_MIRROR_S)
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_S		15
+#define MEV_RULE_CFG_CTRL_FIXED_FETCH_M		\
+		MAKE_MASK16(0x1, MEV_RULE_CFG_CTRL_FIXED_FETCH_S)
+
+/**
+ * macro to build the CFG_CTRL for rule packet data, which is one of
+ * cpfl_prep_sem_rule_blob()'s input parameter.
+ */
+ /* build SEM CFG_CTRL*/
+#define CPFL_GET_MEV_SEM_RULE_CFG_CTRL(prof_id, sub_prof_id,		       \
+				       pin_to_cache, fixed_fetch)	       \
+		(SHIFT_VAL16((prof_id), MEV_RULE_CFG_CTRL_PROF_ID)	     | \
+		 SHIFT_VAL16((sub_prof_id), MEV_RULE_CFG_CTRL_SUB_PROF_ID)   | \
+		 SHIFT_VAL16((pin_to_cache), MEV_RULE_CFG_CTRL_PIN_CACHE)    | \
+		 SHIFT_VAL16((fixed_fetch), MEV_RULE_CFG_CTRL_FIXED_FETCH))
+
+/* build LEM CFG_CTRL*/
+#define CPFL_GET_MEV_LEM_RULE_CFG_CTRL(prof_id, pin_to_cache, clear_mirror)    \
+		(SHIFT_VAL16(prof_id, MEV_RULE_CFG_CTRL_PROF_ID)             | \
+		 SHIFT_VAL16(pin_to_cache, MEV_RULE_CFG_CTRL_PIN_CACHE)      | \
+		 SHIFT_VAL16(clear_mirror, MEV_RULE_CFG_CTRL_CLEAR_MIRROR))
+
+/* macros for creating mod content config packets */
+#define MEV_RULE_MOD_INDEX_S		24
+#define MEV_RULE_MOD_INDEX_M		\
+		MAKE_MASK64(0xFFFFFFFF, MEV_RULE_MOD_INDEX_S)
+
+#define MEV_RULE_PIN_MOD_CONTENT_S	62
+#define MEV_RULE_PIN_MOD_CONTENT_M	\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_PIN_MOD_CONTENT_S)
+#define MEV_RULE_MOD_OBJ_SIZE_S		63
+#define MEV_RULE_MOD_OBJ_SIZE_M		\
+		MAKE_MASK64((uint64_t)0x1, MEV_RULE_MOD_OBJ_SIZE_S)
+
+/**
+ * struct cpfl_sem_rule_cfg_pkt - Describes rule information for SEM
+ * note: The key may be in mixed big/little endian format, the rest of members
+ * are in little endian
+ */
+struct cpfl_sem_rule_cfg_pkt {
+#define MEV_SEM_RULE_KEY_SIZE 128
+	uint8_t key[MEV_SEM_RULE_KEY_SIZE];
+
+#define MEV_SEM_RULE_ACT_SIZE 72
+	uint8_t actions[MEV_SEM_RULE_ACT_SIZE];
+
+	/* Bit(s):
+	 * 10:0 : PROFILE_ID
+	 * 12:11: SUB_PROF_ID (used for SEM only)
+	 * 13   : pin the SEM key content into the cache
+	 * 14   : Reserved
+	 * 15   : Fixed_fetch
+	 */
+	uint8_t cfg_ctrl[2];
+
+	/* Bit(s):
+	 * 0:     valid
+	 * 15:1:  Hints
+	 * 26:16: PROFILE_ID, the profile associated with the entry
+	 * 31:27: PF
+	 * 55:32: FLOW ID (assigned by HW)
+	 * 63:56: EPOCH
+	 */
+	uint8_t ctrl_word[8];
+	uint8_t padding[46];
+};
+
+/**
+ * union cpfl_rule_cfg_pkt_record - Describes rule data blob
+ */
+union cpfl_rule_cfg_pkt_record {
+	struct cpfl_sem_rule_cfg_pkt sem_rule;
+	uint8_t pkt_data[256];
+	uint8_t mod_blob[256];
+};
+
+/**
+ * cpfl_rule_query_addr - LEM/SEM Rule Query Address structure
+ */
+struct cpfl_rule_query_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_query_del_addr - Rule Query and Delete Address
+ */
+struct cpfl_rule_query_del_addr {
+	uint8_t	obj_id;
+	uint32_t	obj_addr;
+};
+
+/**
+ * cpfl_rule_mod_content - MOD Rule Content
+ */
+struct cpfl_rule_mod_content {
+	uint8_t	obj_size;
+	uint8_t	pin_content;
+	uint32_t	index;
+};
+
+/**
+ * cpfl_rule_cfg_data_common - data struct for all rule opcodes
+ *note: some rules may only require part of structure
+ */
+struct cpfl_rule_cfg_data_common {
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	uint64_t	cookie;
+	uint16_t	vsi_id;
+	uint8_t	port_num;
+	uint8_t	host_id;
+	uint8_t	time_sel;
+	uint8_t	time_sel_val;
+	uint8_t	cache_wr_thru;
+	uint8_t	resp_req;
+	uint32_t	ret_val;
+	uint16_t	buf_len;
+	struct idpf_dma_mem *payload;
+};
+
+/**
+ * cpfl_rule_cfg_data - rule config data
+ * note: Before sending rule to HW, caller needs to fill
+ *       in this struct then call cpfl_prep_rule_desc().
+ */
+struct cpfl_rule_cfg_data {
+	struct cpfl_rule_cfg_data_common common;
+	union {
+		struct cpfl_rule_query_addr query_addr;
+		struct cpfl_rule_query_del_addr query_del_addr;
+		struct cpfl_rule_mod_content mod_content;
+	} ext;
+};
+
+/**
+ * cpfl_fill_rule_mod_content - fill info for mod content
+ */
+static inline void
+cpfl_fill_rule_mod_content(uint8_t mod_obj_size,
+			   uint8_t pin_mod_content,
+			   uint32_t mod_index,
+			   struct cpfl_rule_mod_content *mod_content)
+{
+	mod_content->obj_size = mod_obj_size;
+	mod_content->pin_content = pin_mod_content;
+	mod_content->index = mod_index;
+}
+
+/**
+ * cpfl_fill_rule_cfg_data_common - fill in rule config data for all opcodes
+ * note: call this function before calls cpfl_prep_rule_desc()
+ */
+static inline void
+cpfl_fill_rule_cfg_data_common(enum cpfl_ctlq_rule_cfg_opc opc,
+			       uint64_t cookie,
+			       uint16_t vsi_id,
+			       uint8_t port_num,
+			       uint8_t host_id,
+			       uint8_t time_sel,
+			       uint8_t time_sel_val,
+			       uint8_t cache_wr_thru,
+			       uint8_t resp_req,
+			       uint16_t payload_len,
+			       struct idpf_dma_mem *payload,
+			       struct cpfl_rule_cfg_data_common *cfg_cmn)
+{
+	cfg_cmn->opc = opc;
+	cfg_cmn->cookie = cookie;
+	cfg_cmn->vsi_id = vsi_id;
+	cfg_cmn->port_num = port_num;
+	cfg_cmn->resp_req = resp_req;
+	cfg_cmn->ret_val = 0;
+	cfg_cmn->host_id = host_id;
+	cfg_cmn->time_sel = time_sel;
+	cfg_cmn->time_sel_val = time_sel_val;
+	cfg_cmn->cache_wr_thru = cache_wr_thru;
+
+	cfg_cmn->buf_len = payload_len;
+	cfg_cmn->payload = payload;
+}
+
+void
+cpfl_prep_rule_desc(struct cpfl_rule_cfg_data *cfg_data,
+		    struct idpf_ctlq_msg *ctlq_msg);
+
+void
+cpfl_prep_sem_rule_blob(const uint8_t *key,
+			uint8_t key_byte_len,
+			const uint8_t *act_bytes,
+			uint8_t act_byte_len,
+			uint16_t cfg_ctrl,
+			union cpfl_rule_cfg_pkt_record *rule_blob);
+
+#endif /* _CPFL_RULES_API_H_ */
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 7b8d043011..84ba994469 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -43,6 +43,8 @@ js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
     sources += files(
         'cpfl_flow_parser.c',
+        'cpfl_rules.c',
+        'cpfl_controlq.c',
     )
     dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
     ext_deps += js_dep
-- 
2.34.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v2 4/4] net/cpfl: setup ctrl path
  2023-08-11  9:30 ` [PATCH 1/4] net/cpfl: parse flow parser file in devargs Wenjing Qiao
                     ` (3 preceding siblings ...)
  2023-08-11 10:00   ` [PATCH v2 3/4] net/cpfl: introduce CPF common library Wenjing Qiao
@ 2023-08-11 10:00   ` Wenjing Qiao
  4 siblings, 0 replies; 15+ messages in thread
From: Wenjing Qiao @ 2023-08-11 10:00 UTC (permalink / raw)
  To: yuying.zhang, beilei.xing; +Cc: dev, mingxia.liu, Wenjing Qiao, Qi Zhang

Setup the control vport and control queue for flow offloading.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
Depends-on: series-29139 ("net/cpfl: support port representor")
---
 drivers/net/cpfl/cpfl_ethdev.c | 270 ++++++++++++++++++++++++++++++++-
 drivers/net/cpfl/cpfl_ethdev.h |  14 ++
 drivers/net/cpfl/cpfl_vchnl.c  | 144 ++++++++++++++++++
 3 files changed, 425 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a2f308fb86..34b7c22ee1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1783,9 +1783,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
 		return;
 	}
 
-	/* ignore if it is exceptional vport */
-	if (adapter->exceptional_vport &&
-	    adapter->exceptional_vport->base.vport_id == vc_event->vport_id)
+	/* ignore if it is ctrl vport or exceptional vport */
+	if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id ||
+	    (adapter->exceptional_vport &&
+	     adapter->exceptional_vport->base.vport_id == vc_event->vport_id))
 		return;
 
 	vport = cpfl_find_vport(adapter, vc_event->vport_id);
@@ -1983,6 +1984,260 @@ cpfl_dev_alarm_handler(void *param)
 	rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 }
 
+static int
+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	int i, ret;
+
+	ret = cpfl_config_ctlq_tx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
+		return ret;
+	}
+
+	ret = cpfl_config_ctlq_rx(adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
+		return ret;
+	}
+
+	for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
+			return ret;
+		}
+	}
+
+	for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+		ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	int i;
+
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+}
+
+static int
+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+	struct idpf_ctlq_info *cfg_cq;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
+					  &adapter->cfgq_info[i],
+					  &cfg_cq);
+		if (ret || !cfg_cq) {
+			PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d",
+				    adapter->cfgq_info[i].id);
+			cpfl_remove_cfgqs(adapter);
+			return ret;
+		}
+		PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
+			    adapter->cfgq_info[i].id);
+		adapter->ctlqp[i] = cfg_cq;
+	}
+
+	return ret;
+}
+
+#define CPFL_CFGQ_RING_LEN		512
+#define CPFL_CFGQ_DESCRIPTOR_SIZE	32
+#define CPFL_CFGQ_BUFFER_SIZE		256
+#define CPFL_CFGQ_RING_SIZE		512
+
+static int
+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_ctlq_create_info *create_cfgq_info;
+	struct cpfl_vport *vport;
+	int i, err;
+	uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);
+	uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;
+
+	vport = &adapter->ctrl_vport;
+	create_cfgq_info = adapter->cfgq_info;
+
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (i % 2 == 0) {
+			/* Setup Tx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +
+				i / 2 * vport->base.chunks_info.tx_qtail_spacing;
+		} else {
+			/* Setup Rx config queue */
+			create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;
+			create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;
+			create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+			create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+			memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+			create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +
+				i / 2 * vport->base.chunks_info.rx_qtail_spacing;
+			if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,
+						buf_size)) {
+				err = -ENOMEM;
+				goto free_mem;
+			}
+		}
+		if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,
+					ring_size)) {
+			err = -ENOMEM;
+			goto free_mem;
+		}
+	}
+	return 0;
+free_mem:
+	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+		if (create_cfgq_info[i].ring_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+		if (create_cfgq_info[i].buf_mem.va)
+			idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+	}
+	return err;
+}
+
+static int
+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_create_vport *vport_info =
+		(struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;
+	int i;
+
+	vport->itf.adapter = adapter;
+	vport->base.adapter = &adapter->base;
+	vport->base.vport_id = vport_info->vport_id;
+
+	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+		if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {
+			vport->base.chunks_info.tx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.tx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.tx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {
+			vport->base.chunks_info.rx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->base.chunks_info.rx_qtail_start =
+			vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->base.chunks_info.rx_qtail_spacing =
+			vport_info->chunks.chunks[i].qtail_reg_spacing;
+		} else {
+			PMD_INIT_LOG(ERR, "Unsupported chunk type");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)
+{
+	cpfl_remove_cfgqs(adapter);
+	cpfl_stop_cfgqs(adapter);
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+}
+
+static int
+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)
+{
+	int ret;
+
+	ret = cpfl_vc_create_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to create control vport");
+		return ret;
+	}
+
+	ret = cpfl_init_ctrl_vport(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to init control vport");
+		goto err_init_ctrl_vport;
+	}
+
+	ret = cpfl_cfgq_setup(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control queues");
+		goto err_cfgq_setup;
+	}
+
+	ret = cpfl_add_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to add control queues");
+		goto err_add_cfgq;
+	}
+
+	ret = cpfl_start_cfgqs(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to start control queues");
+		goto err_start_cfgqs;
+	}
+
+	return 0;
+
+err_start_cfgqs:
+	cpfl_stop_cfgqs(adapter);
+err_add_cfgq:
+	cpfl_remove_cfgqs(adapter);
+err_cfgq_setup:
+err_init_ctrl_vport:
+	idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+
+	return ret;
+}
+
 static struct virtchnl2_get_capabilities req_caps = {
 	.csum_caps =
 	VIRTCHNL2_CAP_TX_CSUM_L3_IPV4          |
@@ -2150,6 +2405,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 		goto err_vports_alloc;
 	}
 
+	ret = cpfl_ctrl_path_open(adapter);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to setup control path");
+		goto err_create_ctrl_vport;
+	}
+
 	adapter->cur_vports = 0;
 	adapter->cur_vport_nb = 0;
 
@@ -2157,6 +2418,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
 
 	return ret;
 
+err_create_ctrl_vport:
+	rte_free(adapter->vports);
 err_vports_alloc:
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_repr_whitelist_uninit(adapter);
@@ -2450,6 +2713,7 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
 static void
 cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
 {
+	cpfl_ctrl_path_close(adapter);
 	rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
 	cpfl_vport_map_uninit(adapter);
 	idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index cf989a29b3..2e9480ffc1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -22,6 +22,7 @@
 #include "cpfl_logs.h"
 #include "cpfl_cpchnl.h"
 #include "cpfl_representor.h"
+#include "cpfl_controlq.h"
 
 /* Currently, backend supports up to 8 vports */
 #define CPFL_MAX_VPORT_NUM	8
@@ -89,6 +90,10 @@
 
 #define CPFL_FLOW_FILE_LEN 100
 
+#define CPFL_RX_CFGQ_NUM	4
+#define CPFL_TX_CFGQ_NUM	4
+#define CPFL_CFGQ_NUM		8
+
 struct cpfl_vport_param {
 	struct cpfl_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
@@ -216,10 +221,19 @@ struct cpfl_adapter_ext {
 
 	rte_spinlock_t repr_lock;
 	struct rte_hash *repr_whitelist_hash;
+
+	/* ctrl vport and ctrl queues. */
+	struct cpfl_vport ctrl_vport;
+	uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
+	struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
+	struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
 };
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
 int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
 			   struct cpfl_vport_id *vport_identity,
 			   struct cpchnl2_vport_info *vport_info);
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
index a21a4a451f..932840a972 100644
--- a/drivers/net/cpfl/cpfl_vchnl.c
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 
 	return 0;
 }
+
+int
+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+	struct virtchnl2_create_vport vport_msg;
+	struct idpf_cmd_info args;
+	int err = -1;
+
+	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
+	vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+	vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+	vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
+	vport_msg.num_tx_complq = 0;
+	vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
+	vport_msg.num_rx_bufq = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+	args.in_args = (uint8_t *)&vport_msg;
+	args.in_args_size = sizeof(vport_msg);
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+		return err;
+	}
+
+	rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
+		   IDPF_DFLT_MBX_BUF_SIZE);
+	return err;
+}
+
+int
+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_RX_CFGQ_NUM;
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (!vc_rxqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->base.vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		rxq_info = &vc_rxqs->qinfo[i];
+		rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
+		rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;
+		rxq_info->max_pkt_size = vport->base.max_pkt_len;
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+		rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_rxqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)
+{
+	struct cpfl_vport *vport = &adapter->ctrl_vport;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		PMD_DRV_LOG(ERR, "This txq model isn't supported.");
+		err = -EINVAL;
+		return err;
+	}
+
+	num_qs = CPFL_TX_CFGQ_NUM;
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (!vc_txqs) {
+		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->base.vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	for (i = 0; i < num_qs; i++) {
+		txq_info = &vc_txqs->qinfo[i];
+		txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
+		txq_info->queue_id = adapter->cfgq_info[2 * i].id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = adapter->cfgq_info[2 * i].len;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->base.mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_vc_cmd_execute(&adapter->base, &args);
+	rte_free(vc_txqs);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
-- 
2.34.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* RE: [PATCH v2 1/4] net/cpfl: parse flow parser file in devargs
  2023-08-11 10:00   ` [PATCH v2 1/4] net/cpfl: parse flow parser file in devargs Wenjing Qiao
@ 2023-08-24  3:15     ` Xing, Beilei
  0 siblings, 0 replies; 15+ messages in thread
From: Xing, Beilei @ 2023-08-24  3:15 UTC (permalink / raw)
  To: Qiao, Wenjing, Zhang, Yuying; +Cc: dev, Liu, Mingxia



> -----Original Message-----
> From: Qiao, Wenjing <wenjing.qiao@intel.com>
> Sent: Friday, August 11, 2023 6:00 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Qiao, Wenjing
> <wenjing.qiao@intel.com>
> Subject: [PATCH v2 1/4] net/cpfl: parse flow parser file in devargs
> 
> Add devargs "flow_parser" for rte_flow json parser.
> 
> Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
> ---
> Depends-on: series-29139 ("net/cpfl: support port representor")
> ---
>  drivers/net/cpfl/cpfl_ethdev.c | 30 +++++++++++++++++++++++++++++-
> drivers/net/cpfl/cpfl_ethdev.h |  3 +++
>  drivers/net/cpfl/meson.build   |  6 ++++++
>  3 files changed, 38 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index
> 8dbc175749..a2f308fb86 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -21,6 +21,7 @@
>  #define CPFL_TX_SINGLE_Q	"tx_single"
>  #define CPFL_RX_SINGLE_Q	"rx_single"
>  #define CPFL_VPORT		"vport"
> +#define CPFL_FLOW_PARSER	"flow_parser"
> 
>  rte_spinlock_t cpfl_adapter_lock;
>  /* A list for all adapters, one adapter matches one PCI device */ @@ -32,6
> +33,9 @@ static const char * const cpfl_valid_args_first[] = {
>  	CPFL_TX_SINGLE_Q,
>  	CPFL_RX_SINGLE_Q,
>  	CPFL_VPORT,
> +#ifdef CPFL_FLOW_JSON_SUPPORT
> +	CPFL_FLOW_PARSER,
> +#endif
>  	NULL
>  };
> 
> @@ -1671,6 +1675,19 @@ parse_repr(const char *key __rte_unused, const
> char *value, void *args)
>  	return 0;
>  }
> 
> +#ifdef CPFL_FLOW_JSON_SUPPORT
> +static int
> +parse_parser_file(const char *key, const char *value, void *args) {
> +	char *name = args;
> +
> +	PMD_DRV_LOG(DEBUG, "value:\"%s\" for key:\"%s\"", value, key);

Better to check if the value is valid first, e.g. return error if the length > CPFL_FLOW_FILE_LEN.

> +	strlcpy(name, value, CPFL_FLOW_FILE_LEN);
> +
> +	return 0;
> +}
> +#endif
> +
>  static int
>  cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext
> *adapter, bool first)  { @@ -1719,7 +1736,18 @@ cpfl_parse_devargs(struct
> rte_pci_device *pci_dev, struct cpfl_adapter_ext *adap
>  				 &adapter->base.is_rx_singleq);
>  	if (ret != 0)
>  		goto fail;
> -
> +#ifdef CPFL_FLOW_JSON_SUPPORT
> +	if (rte_kvargs_get(kvlist, CPFL_FLOW_PARSER)) {
> +		ret = rte_kvargs_process(kvlist, CPFL_FLOW_PARSER,
> +					 &parse_parser_file, cpfl_args-
> >flow_parser);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "Failed to parser flow_parser,
> ret: %d", ret);
> +			goto fail;
> +		}
> +	} else {
> +		cpfl_args->flow_parser[0] = '\0';
> +	}
> +#endif
>  fail:
>  	rte_kvargs_free(kvlist);
>  	return ret;
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
> index 5bd6f930b8..cf989a29b3 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -87,6 +87,8 @@
>  #define ACC_LCE_ID	15
>  #define IMC_MBX_EFD_ID	0
> 
> +#define CPFL_FLOW_FILE_LEN 100
> +
>  struct cpfl_vport_param {
>  	struct cpfl_adapter_ext *adapter;
>  	uint16_t devarg_id; /* arg id from user */ @@ -100,6 +102,7 @@
> struct cpfl_devargs {
>  	uint16_t req_vport_nb;
>  	uint8_t repr_args_num;
>  	struct rte_eth_devargs repr_args[CPFL_REPR_ARG_NUM_MAX];
> +	char flow_parser[CPFL_FLOW_FILE_LEN];
>  };
> 
>  struct p2p_queue_chunks_info {
> diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index
> fb075c6860..0be25512c3 100644
> --- a/drivers/net/cpfl/meson.build
> +++ b/drivers/net/cpfl/meson.build
> @@ -38,3 +38,9 @@ if arch_subdir == 'x86'
>          cflags += ['-DCC_AVX512_SUPPORT']
>      endif
>  endif
> +
> +js_dep = dependency('json-c', required: false, method : 'pkg-config')
> +if js_dep.found()
> +    dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
> +    ext_deps += js_dep
> +endif
> \ No newline at end of file
> --
> 2.34.1

Update doc to describe installing json lib first if need to parse json file.


^ permalink raw reply	[flat|nested] 15+ messages in thread

* RE: [PATCH v2 2/4] net/cpfl: add flow json parser
  2023-08-11 10:00   ` [PATCH v2 2/4] net/cpfl: add flow json parser Wenjing Qiao
@ 2023-08-24  7:25     ` Xing, Beilei
  0 siblings, 0 replies; 15+ messages in thread
From: Xing, Beilei @ 2023-08-24  7:25 UTC (permalink / raw)
  To: Qiao, Wenjing, Zhang, Yuying; +Cc: dev, Liu, Mingxia



> -----Original Message-----
> From: Qiao, Wenjing <wenjing.qiao@intel.com>
> Sent: Friday, August 11, 2023 6:00 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Qiao, Wenjing
> <wenjing.qiao@intel.com>
> Subject: [PATCH v2 2/4] net/cpfl: add flow json parser
> 
> A JSON file will be used to direct DPDK CPF PMD to
> parse rte_flow tokens into low level hardware resources
> defined in a DDP package file.
> 
> Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
> ---
> Depends-on: series-29139 ("net/cpfl: support port representor")
> ---
>  drivers/net/cpfl/cpfl_flow_parser.c | 1758 +++++++++++++++++++++++++++
>  drivers/net/cpfl/cpfl_flow_parser.h |  205 ++++
>  drivers/net/cpfl/meson.build        |    3 +
>  3 files changed, 1966 insertions(+)
>  create mode 100644 drivers/net/cpfl/cpfl_flow_parser.c
>  create mode 100644 drivers/net/cpfl/cpfl_flow_parser.h
> 
> diff --git a/drivers/net/cpfl/cpfl_flow_parser.c
> b/drivers/net/cpfl/cpfl_flow_parser.c
> new file mode 100644
> index 0000000000..b4635813ff
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_flow_parser.c
> @@ -0,0 +1,1758 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 Intel Corporation
> + */
> +
> +#include <arpa/inet.h>
> +#include <asm-generic/errno-base.h>
> +#include <stdint.h>
> +
> +#include "cpfl_flow_parser.h"
> +#include "cpfl_ethdev.h"
> +#include "rte_malloc.h"
> +
> +static enum rte_flow_item_type
> +cpfl_get_item_type_by_str(const char *type)
> +{
> +	if (strcmp(type, "eth") == 0)
> +		return RTE_FLOW_ITEM_TYPE_ETH;
> +	else if (strcmp(type, "ipv4") == 0)
> +		return RTE_FLOW_ITEM_TYPE_IPV4;
> +	else if (strcmp(type, "tcp") == 0)
> +		return RTE_FLOW_ITEM_TYPE_TCP;
> +	else if (strcmp(type, "udp") == 0)
> +		return RTE_FLOW_ITEM_TYPE_UDP;
> +	else if (strcmp(type, "vxlan") == 0)
> +		return RTE_FLOW_ITEM_TYPE_VXLAN;
> +	else if (strcmp(type, "icmp") == 0)
> +		return RTE_FLOW_ITEM_TYPE_ICMP;
> +	else if (strcmp(type, "vlan") == 0)
> +		return RTE_FLOW_ITEM_TYPE_VLAN;
> +
> +	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
> +	return RTE_FLOW_ITEM_TYPE_VOID;
> +}
> +
> +static enum rte_flow_action_type
> +cpfl_get_action_type_by_str(const char *type)
> +{
> +	if (strcmp(type, "vxlan_encap") == 0)
> +		return RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
> +
> +	PMD_DRV_LOG(ERR, "Not support this type: %s.", type);

Why the function only supports vxlan_encap? It's a bit confused.
If only for vxlan_encap, better to change the function name.

> +	return RTE_FLOW_ACTION_TYPE_VOID;
> +}
> +
> +static const char *
> +cpfl_json_object_to_string(json_object *object, const char *name)
> +{
> +	json_object *subobject;
> +
> +	if (!object) {
> +		PMD_DRV_LOG(ERR, "object doesn't exist.");
> +		return NULL;
> +	}
> +	subobject = json_object_object_get(object, name);
> +	if (!subobject) {
> +		PMD_DRV_LOG(ERR, "%s doesn't exist.", name);
> +		return 0;

Return NULL?

> +	}
> +	return json_object_get_string(subobject);
> +}
> +

<...>

> +static int
> +cpfl_flow_js_pattern_key_proto_field(json_object *cjson_field,
> +				     struct cpfl_flow_js_pr_key_proto *js_field)
> +{
> +	if (cjson_field) {

How about 
if (cjson_field ! =0 )
        return 0;
first?

> +		int len, i;
> +
> +		len = json_object_array_length(cjson_field);
> +		js_field->fields_size = len;
> +		if (len == 0)
> +			return 0;
> +		js_field->fields =
> +		    rte_malloc(NULL, sizeof(struct
> cpfl_flow_js_pr_key_proto_field) * len, 0);
> +		if (!js_field->fields) {
> +			PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +			return -ENOMEM;
> +		}
> +		for (i = 0; i < len; i++) {
> +			json_object *object;
> +			const char *name, *mask;
> +
> +			object = json_object_array_get_idx(cjson_field, i);
> +			name = cpfl_json_object_to_string(object, "name");
> +			if (!name) {
> +				rte_free(js_field->fields);
> +				PMD_DRV_LOG(ERR, "Can not parse string
> 'name'.");
> +				return -EINVAL;
> +			}
> +			if (strlen(name) > CPFL_FLOW_JSON_STR_SIZE_MAX) {
> +				rte_free(js_field->fields);
> +				PMD_DRV_LOG(ERR, "The 'name' is too
> long.");
> +				return -EINVAL;
> +			}
> +			memcpy(js_field->fields[i].name, name, strlen(name));
> +
> +			if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH ||
> +			    js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) {
> +				mask = cpfl_json_object_to_string(object,
> "mask");
> +				if (!mask) {
> +					rte_free(js_field->fields);
> +					PMD_DRV_LOG(ERR, "Can not parse
> string 'mask'.");
> +					return -EINVAL;
> +				}
> +				memcpy(js_field->fields[i].mask, mask,
> strlen(mask));
> +			} else {
> +				uint32_t mask_32b;
> +				int ret;
> +
> +				ret = cpfl_json_object_to_uint32(object,
> "mask", &mask_32b);
> +				if (ret < 0) {
> +					rte_free(js_field->fields);
> +					PMD_DRV_LOG(ERR, "Can not parse
> uint32 'mask'.");
> +					return -EINVAL;
> +				}
> +				js_field->fields[i].mask_32b = mask_32b;
> +			}
> +		}
> +	}
> +	return 0;
> +}
> +
<...>
> +
> +static int
> +cpfl_flow_js_pattern_act_fv_proto(json_object *cjson_value, struct
> cpfl_flow_js_fv *js_fv)
> +{
> +	uint16_t layer = 0, offset = 0, mask = 0;
> +	const char *header;
> +	enum rte_flow_item_type type;
> +	int ret;
> +
> +	ret = cpfl_json_object_to_uint16(cjson_value, "layer", &layer);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'value'.");
> +		return -EINVAL;
> +	}
> +
> +	header = cpfl_json_object_to_string(cjson_value, "header");
> +	if (!header) {
> +		PMD_DRV_LOG(ERR, "Can not parse string 'header'.");
> +		return -EINVAL;
> +	}
> +	ret = cpfl_json_object_to_uint16(cjson_value, "offset", &offset);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
> +		return -EINVAL;
> +	}
> +	ret = cpfl_json_object_to_uint16(cjson_value, "mask", &mask);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Can not parse 'mask'.");
> +		return -EINVAL;
> +	}
> +	js_fv->proto.layer = layer;
> +	js_fv->proto.offset = offset;
> +	js_fv->proto.mask = mask;
> +	type = cpfl_get_item_type_by_str(header);
> +	if (type == RTE_FLOW_ITEM_TYPE_VOID)
> +		return -EINVAL;
> +
No need the blank line.
> +	else
> +		js_fv->proto.header = type;
> +	return 0;
> +}
> +
<...>
> +static int
> +cpfl_flow_js_mr_key(json_object *cjson_mr_key, struct cpfl_flow_js_mr_key
> *js_mr_key)
> +{
> +	int len, i;
> +
> +	len = json_object_array_length(cjson_mr_key);
> +	js_mr_key->actions = rte_malloc(NULL, sizeof(struct
> cpfl_flow_js_mr_key_action) * len, 0);
> +	if (!js_mr_key->actions) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +	js_mr_key->actions_size = len;
> +	for (i = 0; i < len; i++) {
> +		json_object *object, *cjson_mr_key_data;
> +		const char *type;
> +		enum rte_flow_action_type act_type;
> +
> +		object = json_object_array_get_idx(cjson_mr_key, i);
> +		/* mr->key->actions->type */
> +		type = cpfl_json_object_to_string(object, "type");
> +		if (!type) {
> +			rte_free(js_mr_key->actions);
> +			PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
> +			return -EINVAL;
> +		}
> +		act_type = cpfl_get_action_type_by_str(type);
> +		if (act_type == RTE_FLOW_ACTION_TYPE_VOID) {
> +			rte_free(js_mr_key->actions);
> +			return -EINVAL;
> +		}
> +		js_mr_key->actions[i].type = act_type;
> +		/* mr->key->actions->data */
> +		cjson_mr_key_data = json_object_object_get(object, "data");
> +		if (js_mr_key->actions[i].type ==
> RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
> +			json_object *cjson_mr_key_proto;
> +			int proto_size, j;
> +			struct cpfl_flow_js_mr_key_action_vxlan_encap
> *encap;
> +
> +			cjson_mr_key_proto =
> json_object_object_get(cjson_mr_key_data, "protocols");
> +			encap = &js_mr_key->actions[i].encap;
> +			if (!cjson_mr_key_proto) {
> +				encap->proto_size = 0;
> +				continue;
> +			}
> +			proto_size =
> json_object_array_length(cjson_mr_key_proto);
> +			encap->proto_size = proto_size;
> +			for (j = 0; j < proto_size; j++) {
> +				const char *s;
> +				json_object *subobject;
> +				enum rte_flow_item_type proto_type;
> +
> +				subobject =
> json_object_array_get_idx(cjson_mr_key_proto, j);
> +				s = json_object_get_string(subobject);
> +				proto_type = cpfl_get_item_type_by_str(s);
> +				if (proto_type ==
> RTE_FLOW_ITEM_TYPE_VOID) {
> +					rte_free(js_mr_key->actions);
> +					PMD_DRV_LOG(ERR, "parse
> VXLAN_ENCAP failed.");
> +					return -EINVAL;
> +				}
> +				encap->protocols[j] = proto_type;
> +			}
> +
No need the blank line, please check all patches.
> +		} else {
> +			PMD_DRV_LOG(ERR, "not support this type: %d.",
> js_mr_key->actions[i].type);
> +			return -EINVAL;
> +		}
> +	}
> +	return 0;
> +}
> +
<...>
> +static int
> +cpfl_flow_js_mr_action(json_object *cjson_mr_act, struct
> cpfl_flow_js_mr_action *js_mr_act)
> +{
> +	json_object *cjson_mr_action_data;
> +	const char *type;
> +
> +	/* mr->action->type */
> +	type = cpfl_json_object_to_string(cjson_mr_act, "type");
> +	if (!type) {
> +		PMD_DRV_LOG(ERR, "Can not parse string 'type'.");
> +		return -EINVAL;
> +	}
> +
> +	/* mr->action->data */
> +	cjson_mr_action_data = json_object_object_get(cjson_mr_act, "data");
> +	if (strcmp(type, "mod") == 0) {
> +		json_object *layout;
> +		uint16_t profile = 0;
> +		int ret;
> +
> +		js_mr_act->type = CPFL_JS_MR_ACTION_TYPE_MOD;
> +		ret = cpfl_json_object_to_uint16(cjson_mr_action_data,
> "profile", &profile);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
> +			return -EINVAL;
> +		}
> +		js_mr_act->mod.prof = profile;
> +		layout = json_object_object_get(cjson_mr_action_data,
> "layout");
> +		ret = cpfl_flow_js_mr_layout(layout, &js_mr_act->mod);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "Can not parse layout.");
> +			return ret;
> +		}
> +	} else  {
There're two spaces after else.

> +		PMD_DRV_LOG(ERR, "not support this type: %s.", type);
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_flow_js_mod_rule(json_object *json_root, struct cpfl_flow_js_parser
> *parser)
> +{
> +	json_object *cjson_mr;
> +	int i, len;
> +
> +	cjson_mr = json_object_object_get(json_root, "modifications");
> +	if (!cjson_mr) {
> +		PMD_DRV_LOG(INFO, "The modifications is optional.");
> +		return 0;
> +	}
> +
> +	len = json_object_array_length(cjson_mr);
> +	parser->mr_size = len;
> +	if (len == 0)
> +		return 0;

Move the check before 'parser->mr_size = len;'.

> +	parser->modifications = rte_malloc(NULL, sizeof(struct
> cpfl_flow_js_mr) * len, 0);
> +	if (!parser->modifications) {
> +		PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> +		return -ENOMEM;
> +	}
> +
> +	for (i = 0; i < len; i++) {
> +		int ret;
> +		json_object *object, *cjson_mr_key, *cjson_mr_action,
> *cjson_mr_key_action;
> +
> +		object = json_object_array_get_idx(cjson_mr, i);
> +		/* mr->key */
> +		cjson_mr_key = json_object_object_get(object, "key");
> +		/* mr->key->actions */
> +		cjson_mr_key_action = json_object_object_get(cjson_mr_key,
> "actions");
> +
> +		ret = cpfl_flow_js_mr_key(cjson_mr_key_action, &parser-
> >modifications[i].key);
> +		if (ret < 0) {
> +			rte_free(parser->modifications);
> +			PMD_DRV_LOG(ERR, "parse mr_key failed.");
> +			return -EINVAL;
> +		}
> +		/* mr->action */
> +		cjson_mr_action = json_object_object_get(object, "action");
> +		ret = cpfl_flow_js_mr_action(cjson_mr_action, &parser-
> >modifications[i].action);
> +		if (ret < 0) {
> +			rte_free(parser->modifications);
> +			PMD_DRV_LOG(ERR, "parse mr_action failed.");
> +			return -EINVAL;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_parser_init(json_object *json_root, struct cpfl_flow_js_parser *parser)
> +{
> +	int ret = 0;
> +
> +	ret = cpfl_flow_js_pattern_rule(json_root, parser);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "parse pattern_rule failed.");
> +		return ret;
> +	}
> +	ret = cpfl_flow_js_mod_rule(json_root, parser);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "parse mod_rule failed.");
> +		return ret;
This ' return ret;' can be omitted. Since it will be executed at last anyway.
> +	}
> +
> +	return ret;
> +}
> +
> +int
> +cpfl_parser_create(struct cpfl_flow_js_parser **flow_parser, const char
> *filename)
> +{
> +	struct cpfl_flow_js_parser *parser;
> +	json_object *root;
> +	int ret;
> +
> +	parser = rte_zmalloc("flow_parser", sizeof(struct cpfl_flow_js_parser),
> 0);
> +	if (!parser) {
> +		PMD_DRV_LOG(ERR, "Not enough memory to create flow
> parser.");
> +		return -ENOMEM;
> +	}
> +	root = json_object_from_file(filename);
> +	if (!root) {
> +		PMD_DRV_LOG(ERR, "Can not load JSON file: %s.", filename);
> +		rte_free(parser);
> +		return -EINVAL;
> +	}
> +	ret = cpfl_parser_init(root, parser);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "parser init failed.");
> +		rte_free(parser);
> +		return -EINVAL;
> +	}
> +	*flow_parser = parser;
> +
> +	ret = json_object_put(root);
> +	if (ret != 1) {
> +		PMD_DRV_LOG(ERR, "Free json_object failed.");

Need to free parser here.
For all the error handling, better to use goto.

> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static void
> +cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
> +{
> +	if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
> +		if (pr_act->sem.fv)

Rte_free will check the pointer, so if condition can be omitted.
Please check all rte_free(xxx) in the patches.

> +			rte_free(pr_act->sem.fv);
> +	}
> +}
> +
> +int
> +cpfl_parser_destroy(struct cpfl_flow_js_parser *parser)
> +{
> +	int i, j;
> +

Better to check if parser is valid.

> +	for (i = 0; i < parser->pr_size; i++) {
> +		struct cpfl_flow_js_pr *pattern = &parser->patterns[i];
> +
> +		for (j = 0; j < pattern->key.proto_size; j++) {
> +			if (pattern->key.protocols[j].fields)
> +				rte_free(pattern->key.protocols[j].fields);
> +		}
> +		if (pattern->key.protocols)
> +			rte_free(pattern->key.protocols);
> +
> +		if (pattern->key.attributes)
> +			rte_free(pattern->key.attributes);
> +
> +		for (j = 0; j < pattern->actions_size; j++) {
> +			struct cpfl_flow_js_pr_action *pr_act;
> +
> +			pr_act = &pattern->actions[j];
> +			cpfl_parser_free_pr_action(pr_act);
> +		}
> +
> +		if (pattern->actions)
> +			rte_free(pattern->actions);
> +	}
> +	if (parser->patterns)
> +		rte_free(parser->patterns);
> +
> +	for (i = 0; i < parser->mr_size; i++) {
> +		struct cpfl_flow_js_mr *mr = &parser->modifications[i];
> +
> +		if (mr->key.actions)
> +			rte_free(mr->key.actions);
> +		if (mr->action.type == CPFL_JS_MR_ACTION_TYPE_MOD &&
> mr->action.mod.layout)
> +			rte_free(mr->action.mod.layout);
> +	}
> +	if (parser->modifications)
> +		rte_free(parser->modifications);
> +
> +	rte_free(parser);
> +	return 0;
> +}
> +
<...>
> +
> +static int
> +cpfl_parse_pr_actions(struct cpfl_flow_js_pr_action *actions,
> +		      int size,
> +		      const struct rte_flow_item *items,
> +		      const struct rte_flow_attr *attr,
> +		      struct cpfl_flow_pr_action *pr_action)
> +{
> +	int i, ret;
> +
> +	for (i = 0; i < size; i++) {
> +		struct cpfl_flow_js_pr_action *pr_act;
> +		enum cpfl_flow_pr_action_type type;
> +
> +		pr_act = &actions[i];
> +		/* pr->actions->type */
> +		type = pr_act->type;
> +		/* pr->actions->data */
> +		if (attr->group % 10 == 1  && type ==
> CPFL_JS_PR_ACTION_TYPE_SEM) {
> +			struct cpfl_flow_js_pr_action_sem *sem = &pr_act-
> >sem;
> +
> +			pr_action->type = CPFL_JS_PR_ACTION_TYPE_SEM;
> +			pr_action->sem.prof = sem->prof;
> +			pr_action->sem.subprof = sem->subprof;
> +			pr_action->sem.keysize = sem->keysize;
> +			memset(pr_action->sem.cpfl_flow_pr_fv, 0,
> +			       sizeof(pr_action->sem.cpfl_flow_pr_fv));
> +			ret = cpfl_parse_fieldvectors(sem->fv, sem->fv_size,
> +						      pr_action-
> >sem.cpfl_flow_pr_fv, items);
> +			return ret;
> +		} else if (attr->group > 4 || attr->group == 0) {

What does 4 mean here? How about define a macro to describe it?

> +			return -EPERM;
> +		}
> +	}
> +	return 0;
> +}
> +
> +static int
> +str2MAC(const char *mask, uint8_t *addr_bytes)

Please keep cpfl pmd function name style.

> +{
> +	int i, size, j;
> +	uint8_t n;
> +
> +	size = strlen(mask);
> +	n = 0;
> +	j = 0;
> +	for (i = 0; i < size; i++) {
> +		char ch = mask[i];
> +
> +		if (ch == ':') {
> +			if (j >= RTE_ETHER_ADDR_LEN)
> +				return -EINVAL;
> +			addr_bytes[j++] = n;
> +			n = 0;
> +		} else if (ch >= 'a' && ch <= 'f') {
> +			n = n * 16 + ch - 'a' + 10;
> +		} else if (ch >= 'A' && ch <= 'F') {
> +			n = n * 16 + ch - 'A' + 10;
> +		} else if (ch >= '0' && ch <= '9') {
> +			n = n * 16 + ch - '0';
> +		} else {
> +			return -EINVAL;
> +		}
> +	}
> +	if (j < RTE_ETHER_ADDR_LEN)
> +		addr_bytes[j++] = n;
> +
> +	if (j != RTE_ETHER_ADDR_LEN)
> +		return -EINVAL;
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_eth_mask(const char *mask, const uint8_t
> addr_bytes[RTE_ETHER_ADDR_LEN])
> +{
> +	int i, ret;
> +	uint8_t mask_bytes[RTE_ETHER_ADDR_LEN] = {0};
> +
> +	ret = str2MAC(mask, mask_bytes);
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "string to mac address failed.");
> +		return -EINVAL;
> +	}
> +	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
> +		if (mask_bytes[i] != addr_bytes[i])
> +			return -EINVAL;
> +	}
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_ipv4_mask(const char *mask, rte_be32_t addr)
> +{
> +	uint32_t out_addr;
> +
> +	/* success return 0; invalid return -EINVAL; fail return -ENOTSUP */
> +	int ret = inet_pton(AF_INET, mask, &out_addr);
> +
> +	if (ret < 0)
> +		return -EINVAL;
> +
> +	if (out_addr != addr)
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_eth(struct cpfl_flow_js_pr_key_proto *proto, const struct
> rte_flow_item_eth *eth_mask)
> +{
> +	int field_size, j;
> +	int flag_dst_addr, flag_src_addr, flag_ether_type;
> +	struct cpfl_flow_js_pr_key_proto_field *field;
> +
> +	if (!proto)
> +		return 0;
> +	/* eth_mask->dst.addr_bytes */
Could you detail the comments? Seems it's not related with the following code.

> +
> +	field_size = proto->fields_size;
> +	if (field_size != 0 && !eth_mask)
> +		return -EINVAL;
> +
> +	if (field_size == 0 && eth_mask)
> +		return -EINVAL;
> +
> +	if (field_size == 0 && !eth_mask)
> +		return 0;
> +
> +	flag_dst_addr = false;
> +	flag_src_addr = false;
> +	flag_ether_type = false;
> +	for (j = 0; j < field_size; j++) {
> +		const char *name, *s_mask;
> +
> +		field = &proto->fields[j];
> +		/* match: rte_flow_item_eth.dst, more see Field Mapping
> +		 */
> +		name = field->name;
> +		/* match: rte_flow_item->mask */
> +		if (strcmp(name, "src_addr") == 0) {
> +			s_mask = field->mask;
> +			if (cpfl_check_eth_mask(s_mask, eth_mask-
> >src.addr_bytes) < 0)
> +				return -EINVAL;
> +			flag_src_addr = true;
> +		} else if (strcmp(name, "dst_addr") == 0) {
> +			s_mask = field->mask;
> +			if (cpfl_check_eth_mask(s_mask, eth_mask-
> >dst.addr_bytes) < 0)
> +				return -EINVAL;
> +			flag_dst_addr = true;
> +		} else if (strcmp(name, "ether_type") == 0) {
> +			uint16_t mask = (uint16_t)field->mask_32b;
> +
> +			if (mask != eth_mask->type)
> +				return -EINVAL;
> +			flag_ether_type = true;
> +		} else {
> +			/* TODO: more type... */
> +			PMD_DRV_LOG(ERR, "not support this name.");
> +			return -EINVAL;
> +		}
> +	}
> +	if (!flag_src_addr) {
> +		if (strcmp((const char *)eth_mask->src.addr_bytes,
> "\x00\x00\x00\x00\x00\x00") != 0)
> +			return -EINVAL;
> +	}
> +	if (!flag_dst_addr) {
> +		if (strcmp((const char *)eth_mask->dst.addr_bytes,
> "\x00\x00\x00\x00\x00\x00") != 0)
> +			return -EINVAL;
> +	}
> +	if (!flag_ether_type) {
> +		if (eth_mask->hdr.ether_type != (rte_be16_t)0)
> +			return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_check_ipv4(struct cpfl_flow_js_pr_key_proto *proto, const struct
> rte_flow_item_ipv4 *ipv4_mask)
> +{
> +	if (proto) {

How about 
if (proto == NULL)
    return 0;
first?

Please check all other functions.

> +		int field_size, j;
> +		int flag_next_proto_id, flag_src_addr, flag_dst_addr;
> +		struct cpfl_flow_js_pr_key_proto_field *field;
> +
> +		field_size = proto->fields_size;
> +		if (field_size != 0 && !ipv4_mask)
> +			return -EINVAL;
> +
> +		if (field_size == 0 && ipv4_mask)
> +			return -EINVAL;
> +
> +		if (field_size == 0 && !ipv4_mask)
> +			return 0;
> +
> +		flag_dst_addr = false;
> +		flag_src_addr = false;
> +		flag_next_proto_id = false;
> +		for (j = 0; j < field_size; j++) {
> +			const char *name;
> +
> +			field = &proto->fields[j];
> +			name = field->name;
> +			if (strcmp(name, "src_addr") == 0) {
> +				/* match: rte_flow_item->mask */
> +				const char *mask;
> +
> +				mask = field->mask;
> +				if (cpfl_check_ipv4_mask(mask, ipv4_mask-
> >hdr.src_addr) < 0)
> +					return -EINVAL;
> +				flag_src_addr = true;
> +			} else if (strcmp(name, "dst_addr") == 0) {
> +				const char *mask;
> +
> +				mask = field->mask;
> +				if (cpfl_check_ipv4_mask(mask, ipv4_mask-
> >hdr.dst_addr) < 0)
> +					return -EINVAL;
> +				flag_dst_addr = true;
> +			} else if (strcmp(name, "next_proto_id") == 0) {
> +				uint8_t mask;
> +
> +				mask = (uint8_t)field->mask_32b;
> +				if (mask != ipv4_mask->hdr.next_proto_id)
> +					return -EINVAL;
> +				flag_next_proto_id = true;
> +			} else {
> +				PMD_DRV_LOG(ERR, "not support this
> name.");
> +				return -EINVAL;
> +			}
> +		}
> +		if (!flag_src_addr) {
> +			if (ipv4_mask->hdr.src_addr != (rte_be32_t)0)
> +				return -EINVAL;
> +		}
> +		if (!flag_dst_addr) {
> +			if (ipv4_mask->hdr.dst_addr != (rte_be32_t)0)
> +				return -EINVAL;
> +		}
> +		if (!flag_next_proto_id) {
> +			if (ipv4_mask->hdr.next_proto_id != (uint8_t)0)
> +				return -EINVAL;
> +		}
> +	}
> +	return 0;
> +}
> +

<...>

> +static int
> +cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
> +			     int proto_size,
> +			     const struct rte_flow_item *items)
> +{
> +	int i, length, j = 0;
According to the coding style, split j = 0 from definition of i and length.

> +
> +	length = cpfl_get_items_length(items);
> +
> +	if (proto_size > length - 1)
> +		return -EINVAL;
> +
> +	for (i = 0; i < proto_size; i++) {
> +		struct cpfl_flow_js_pr_key_proto *key_proto;
> +		enum rte_flow_item_type type;
> +
> +		key_proto = &protocols[i];
> +		/* pr->key->proto->type */
> +		type = key_proto->type;
> +		/* pr->key->proto->fields */
> +		switch (type) {
> +		case RTE_FLOW_ITEM_TYPE_ETH:
> +			if (items[j++].type == RTE_FLOW_ITEM_TYPE_ETH) {
> +				const struct rte_flow_item_eth *eth_mask;
> +				int ret;
> +
> +				eth_mask = (const struct rte_flow_item_eth
> *)items[i].mask;
> +				ret = cpfl_check_eth(key_proto, eth_mask);
> +				if (ret < 0)
> +					return ret;
> +			} else {
> +				return -EINVAL;
> +			}
> +			break;

<...>
> +
> +
> +/* output: uint8_t *buffer, uint16_t *byte_len */
> +static int
> +cpfl_parse_layout(struct cpfl_flow_js_mr_layout *layouts, int layout_size,
> +		  struct cpfl_flow_mr_key_action *mr_key_action,
> +		  uint8_t *buffer, uint16_t *byte_len)
> +{
> +	int i, start = 0;

int start = 0;
int i;

> +
> +	for (i = 0; i < layout_size; i++) {
> +		int index, size, offset;
> +		const char *hint;
> +		const uint8_t *addr;
> +		struct cpfl_flow_mr_key_action *temp;
> +		struct cpfl_flow_js_mr_layout *layout;
> +
> +		layout = &layouts[i];
> +		/* index links to the element of the actions array. */
> +		index = layout->index;
> +		size = layout->size;
> +		offset = layout->offset;
> +		if (index == -1) {
> +			hint = "dummpy";
> +			start += size;
> +			continue;
> +		}
> +		hint = layout->hint;
> +		addr = NULL;
> +		temp = mr_key_action + index;
> +
> +		if (temp->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP) {
> +			const struct rte_flow_action_vxlan_encap
> *action_vxlan_encap;
> +			struct rte_flow_item *definition;
> +			int def_length, k;
> +
> +			action_vxlan_encap =
> +			    (const struct rte_flow_action_vxlan_encap *)temp-
> >encap.action->conf;
> +			definition = action_vxlan_encap->definition;
> +			def_length = cpfl_get_items_length(definition);
> +			for (k = 0; k < def_length - 1; k++) {
> +				if ((strcmp(hint, "eth") == 0 &&
> +				     definition[k].type ==
> RTE_FLOW_ITEM_TYPE_ETH) ||
> +				    (strcmp(hint, "ipv4") == 0 &&
> +				     definition[k].type ==
> RTE_FLOW_ITEM_TYPE_IPV4) ||
> +				    (strcmp(hint, "udp") == 0 &&
> +				     definition[k].type ==
> RTE_FLOW_ITEM_TYPE_UDP) ||
> +				    (strcmp(hint, "tcp") == 0 &&
> +				     definition[k].type ==
> RTE_FLOW_ITEM_TYPE_TCP) ||
> +				    (strcmp(hint, "vxlan") == 0 &&
> +				     definition[k].type ==
> RTE_FLOW_ITEM_TYPE_VXLAN)) {
> +					addr = (const uint8_t
> *)(definition[k].spec);
> +					if (start > 255) {

Better to use macro for 255.
> +						*byte_len = 0;
> +						PMD_DRV_LOG(ERR, "byte
> length is too long%s",
> +							    hint);
> +						return -EINVAL;
> +					}
> +					memcpy(buffer + start, addr + offset,
> size);
> +					break;
> +				} /* TODO: more hint... */
> +			}
> +			if (k == def_length - 1) {
> +				*byte_len = 0;
> +				PMD_DRV_LOG(ERR, "can not find
> corresponding hint: %s", hint);
> +				return -EINVAL;
> +			}
> +		} else {
> +			*byte_len = 0;
> +			PMD_DRV_LOG(ERR, "Not support this type: %d.",
> temp->type);
> +			return -EINVAL;
> +		}
> +		/* else TODO: more type... */
> +
> +		start += size;
> +	}
> +	*byte_len = start;
> +	return 0;
> +}
> +
<...>
> diff --git a/drivers/net/cpfl/cpfl_flow_parser.h
> b/drivers/net/cpfl/cpfl_flow_parser.h
> new file mode 100644
> index 0000000000..af33051ce2
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_flow_parser.h
> @@ -0,0 +1,205 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 Intel Corporation
> + */
> +#include <json-c/json.h>
> +#include <rte_flow.h>
> +
> +#ifndef _CPFL_FLOW_PARSER_H_
> +#define _CPFL_FLOW_PARSER_H_
> +
> +#define CPFL_FLOW_JSON_STR_SIZE_MAX 100
> +
> +/* Pattern Rules Storage Begin*/
> +enum cpfl_flow_pr_action_type {
> +	CPFL_JS_PR_ACTION_TYPE_SEM,
> +	CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
> +};
> +
> +struct cpfl_flow_js_pr_key_attr {
> +	uint16_t ingress;
> +	uint16_t egress;
> +};
> +
> +struct cpfl_flow_js_pr_key_proto_field {
> +	char name[CPFL_FLOW_JSON_STR_SIZE_MAX];
> +	union {
> +		char mask[CPFL_FLOW_JSON_STR_SIZE_MAX];
> +		uint32_t mask_32b;
> +	};
> +};
> +
> +struct cpfl_flow_js_pr_key_proto {
> +	enum rte_flow_item_type type;
> +	struct cpfl_flow_js_pr_key_proto_field *fields;
> +	int fields_size;
> +};
> +
> +enum cpfl_flow_js_fv_type {
> +	CPFL_FV_TYPE_PROTOCOL,
> +	CPFL_FV_TYPE_IMMEDIATE,
> +	CPFL_FV_TYPE_UNKNOWN = -1,
> +

No need the blank line.
Could you add some comments for the type?

> +};
> +
> +struct cpfl_flow_js_fv {
> +	uint16_t offset;
> +	enum cpfl_flow_js_fv_type type;
> +	union {
> +		uint16_t immediate;
> +		struct {
> +			uint16_t layer;
> +			enum rte_flow_item_type header;
> +			uint16_t offset;
> +			uint16_t mask;
> +		} proto;
> +	};
> +};
> +
> +#define CPFL_MAX_SEM_FV_KEY_SIZE 64

Move all macros up with CPFL_FLOW_JSON_STR_SIZE_MAX.

<...>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* RE: [PATCH 4/4] net/cpfl: setup ctrl path
  2023-08-11  9:30 ` [PATCH 4/4] net/cpfl: setup ctrl path Wenjing Qiao
@ 2023-08-24  9:15   ` Xing, Beilei
  0 siblings, 0 replies; 15+ messages in thread
From: Xing, Beilei @ 2023-08-24  9:15 UTC (permalink / raw)
  To: Qiao, Wenjing, Zhang, Yuying; +Cc: dev, Liu, Mingxia, Zhang, Qi Z



> -----Original Message-----
> From: Qiao, Wenjing <wenjing.qiao@intel.com>
> Sent: Friday, August 11, 2023 5:31 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Qiao, Wenjing
> <wenjing.qiao@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: [PATCH 4/4] net/cpfl: setup ctrl path
> 
> Setup the control vport and control queue for flow offloading.
> 
> Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.c | 270 ++++++++++++++++++++++++++++++++-
> drivers/net/cpfl/cpfl_ethdev.h |  14 ++  drivers/net/cpfl/cpfl_vchnl.c  | 144
> ++++++++++++++++++
>  3 files changed, 425 insertions(+), 3 deletions(-)

<...>

> +
> +static void
> +cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter) {
> +	struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
> +	struct cpfl_ctlq_create_info *create_cfgq_info;
> +	int i;
> +
> +	create_cfgq_info = adapter->cfgq_info;
> +
> +	for (i = 0; i < CPFL_CFGQ_NUM; i++) {
> +		cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
> +		if (create_cfgq_info[i].ring_mem.va)
> +			idpf_free_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].ring_mem);
> +		if (create_cfgq_info[i].buf_mem.va)
> +			idpf_free_dma_mem(&adapter->base.hw,
> &create_cfgq_info[i].buf_mem);

 &adapter->base.hw can be replaced with hw.


^ permalink raw reply	[flat|nested] 15+ messages in thread

* RE: [PATCH 3/4] net/cpfl: introduce CPF common library
  2023-08-11  9:30 ` [PATCH 3/4] net/cpfl: introduce CPF common library Wenjing Qiao
@ 2023-08-24  9:19   ` Xing, Beilei
  2023-09-01  8:14     ` Qiao, Wenjing
  0 siblings, 1 reply; 15+ messages in thread
From: Xing, Beilei @ 2023-08-24  9:19 UTC (permalink / raw)
  To: Qiao, Wenjing, Zhang, Yuying; +Cc: dev, Liu, Mingxia



> -----Original Message-----
> From: Qiao, Wenjing <wenjing.qiao@intel.com>
> Sent: Friday, August 11, 2023 5:31 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Qiao, Wenjing
> <wenjing.qiao@intel.com>
> Subject: [PATCH 3/4] net/cpfl: introduce CPF common library
> 
> Add common library support for CPFL rte_flow to
> create/delete rules.
> 
> Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
> ---


> +int
> +cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct

hw is used, so remove __rte_unused.
Please check other functions.

> idpf_ctlq_info *cq,
> +			 struct cpfl_ctlq_create_info *qinfo)
> +{


^ permalink raw reply	[flat|nested] 15+ messages in thread

* RE: [PATCH 3/4] net/cpfl: introduce CPF common library
  2023-08-24  9:19   ` Xing, Beilei
@ 2023-09-01  8:14     ` Qiao, Wenjing
  0 siblings, 0 replies; 15+ messages in thread
From: Qiao, Wenjing @ 2023-09-01  8:14 UTC (permalink / raw)
  To: Xing, Beilei, Zhang, Yuying; +Cc: dev, Liu, Mingxia

Although it appears that 'hw' is being used, it is not actually being used. Look at the following macros which use 'hw':
#define idpf_calloc(h, c, s)	rte_zmalloc(NULL, (c) * (s), 0)
#define idpf_free(h, m)		rte_free(m)

If I remove '__rte_unused', it will show -Wunused-parameter warning when compiling.

-----Original Message-----
From: Xing, Beilei <beilei.xing@intel.com> 
Sent: Thursday, August 24, 2023 5:20 PM
To: Qiao, Wenjing <wenjing.qiao@intel.com>; Zhang, Yuying <yuying.zhang@intel.com>
Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>
Subject: RE: [PATCH 3/4] net/cpfl: introduce CPF common library



> -----Original Message-----
> From: Qiao, Wenjing <wenjing.qiao@intel.com>
> Sent: Friday, August 11, 2023 5:31 PM
> To: Zhang, Yuying <yuying.zhang@intel.com>; Xing, Beilei 
> <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Qiao, Wenjing 
> <wenjing.qiao@intel.com>
> Subject: [PATCH 3/4] net/cpfl: introduce CPF common library
> 
> Add common library support for CPFL rte_flow to create/delete rules.
> 
> Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
> ---


> +int
> +cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw __rte_unused, struct

hw is used, so remove __rte_unused.
Please check other functions.

> idpf_ctlq_info *cq,
> +			 struct cpfl_ctlq_create_info *qinfo) {


^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2023-09-01  8:14 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-08-11  9:30 [PATCH 0/4] net/cpfl: add basic support for rte_flow Wenjing Qiao
2023-08-11  9:30 ` [PATCH 1/4] net/cpfl: parse flow parser file in devargs Wenjing Qiao
2023-08-11 10:00   ` [PATCH v2 0/4] net/cpfl: add basic support for rte_flow Wenjing Qiao
2023-08-11 10:00   ` [PATCH v2 1/4] net/cpfl: parse flow parser file in devargs Wenjing Qiao
2023-08-24  3:15     ` Xing, Beilei
2023-08-11 10:00   ` [PATCH v2 2/4] net/cpfl: add flow json parser Wenjing Qiao
2023-08-24  7:25     ` Xing, Beilei
2023-08-11 10:00   ` [PATCH v2 3/4] net/cpfl: introduce CPF common library Wenjing Qiao
2023-08-11 10:00   ` [PATCH v2 4/4] net/cpfl: setup ctrl path Wenjing Qiao
2023-08-11  9:30 ` [PATCH 2/4] net/cpfl: add flow json parser Wenjing Qiao
2023-08-11  9:30 ` [PATCH 3/4] net/cpfl: introduce CPF common library Wenjing Qiao
2023-08-24  9:19   ` Xing, Beilei
2023-09-01  8:14     ` Qiao, Wenjing
2023-08-11  9:30 ` [PATCH 4/4] net/cpfl: setup ctrl path Wenjing Qiao
2023-08-24  9:15   ` Xing, Beilei

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).