DPDK patches and discussions
 help / color / mirror / Atom feed
From: vanshika.shukla@nxp.com
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
	Sachin Saxena <sachin.saxena@nxp.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [v1 27/43] net/dpaa2: frame attribute flags parser
Date: Fri, 13 Sep 2024 11:29:43 +0530	[thread overview]
Message-ID: <20240913055959.3246917-28-vanshika.shukla@nxp.com> (raw)
In-Reply-To: <20240913055959.3246917-1-vanshika.shukla@nxp.com>

From: Jun Yang <jun.yang@nxp.com>

FAF parser extracts are used to identify protocol type
instead of extracts of previous protocol' type.
FAF starts from offset 2 to include user defined flags which
will be used for soft protocol distribution.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/net/dpaa2/dpaa2_flow.c | 475 +++++++++++++++++++--------------
 1 file changed, 273 insertions(+), 202 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c
index fe3c9f6d7d..d7b53a1916 100644
--- a/drivers/net/dpaa2/dpaa2_flow.c
+++ b/drivers/net/dpaa2/dpaa2_flow.c
@@ -22,13 +22,6 @@
 #include <dpaa2_ethdev.h>
 #include <dpaa2_pmd_logs.h>
 
-/* Workaround to discriminate the UDP/TCP/SCTP
- * with next protocol of l3.
- * MC/WRIOP are not able to identify
- * the l4 protocol with l4 ports.
- */
-static int mc_l4_port_identification;
-
 static char *dpaa2_flow_control_log;
 static uint16_t dpaa2_flow_miss_flow_id; /* Default miss flow id is 0. */
 
@@ -260,6 +253,10 @@ dpaa2_flow_qos_extracts_log(const struct dpaa2_dev_priv *priv)
 			sprintf(string, "raw offset/len: %d/%d",
 				extract->extract.from_data.offset,
 				extract->extract.from_data.size);
+		} else if (type == DPKG_EXTRACT_FROM_PARSE) {
+			sprintf(string, "parse offset/len: %d/%d",
+				extract->extract.from_parse.offset,
+				extract->extract.from_parse.size);
 		}
 		DPAA2_FLOW_DUMP("%s", string);
 		if ((idx + 1) < dpkg->num_extracts)
@@ -298,6 +295,10 @@ dpaa2_flow_fs_extracts_log(const struct dpaa2_dev_priv *priv,
 			sprintf(string, "raw offset/len: %d/%d",
 				extract->extract.from_data.offset,
 				extract->extract.from_data.size);
+		} else if (type == DPKG_EXTRACT_FROM_PARSE) {
+			sprintf(string, "parse offset/len: %d/%d",
+				extract->extract.from_parse.offset,
+				extract->extract.from_parse.size);
 		}
 		DPAA2_FLOW_DUMP("%s", string);
 		if ((idx + 1) < dpkg->num_extracts)
@@ -631,6 +632,66 @@ dpaa2_flow_fs_rule_insert_hole(struct dpaa2_dev_priv *priv,
 	return 0;
 }
 
+static int
+dpaa2_flow_faf_advance(struct dpaa2_dev_priv *priv,
+	int faf_byte, enum dpaa2_flow_dist_type dist_type, int tc_id,
+	int *insert_offset)
+{
+	int offset, ret;
+	struct dpaa2_key_profile *key_profile;
+	int num, pos;
+
+	if (dist_type == DPAA2_FLOW_QOS_TYPE)
+		key_profile = &priv->extract.qos_key_extract.key_profile;
+	else
+		key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
+
+	num = key_profile->num;
+
+	if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
+		DPAA2_PMD_ERR("Number of extracts overflows");
+		return -EINVAL;
+	}
+
+	if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
+		offset = key_profile->ip_addr_extract_off;
+		pos = key_profile->ip_addr_extract_pos;
+		key_profile->ip_addr_extract_pos++;
+		key_profile->ip_addr_extract_off++;
+		if (dist_type == DPAA2_FLOW_QOS_TYPE) {
+			ret = dpaa2_flow_qos_rule_insert_hole(priv,
+					offset, 1);
+		} else {
+			ret = dpaa2_flow_fs_rule_insert_hole(priv,
+				offset, 1, tc_id);
+		}
+		if (ret)
+			return ret;
+	} else {
+		pos = num;
+	}
+
+	if (pos > 0) {
+		key_profile->key_offset[pos] =
+			key_profile->key_offset[pos - 1] +
+			key_profile->key_size[pos - 1];
+	} else {
+		key_profile->key_offset[pos] = 0;
+	}
+
+	key_profile->key_size[pos] = 1;
+	key_profile->prot_field[pos].type = DPAA2_FAF_KEY;
+	key_profile->prot_field[pos].key_field = faf_byte;
+	key_profile->num++;
+
+	if (insert_offset)
+		*insert_offset = key_profile->key_offset[pos];
+
+	key_profile->key_max_size++;
+
+	return pos;
+}
+
 /* Move IPv4/IPv6 addresses to fill new extract previous IP address.
  * Current MC/WRIOP only support generic IP extract but IP address
  * is not fixed, so we have to put them at end of extracts, otherwise,
@@ -692,6 +753,7 @@ dpaa2_flow_key_profile_advance(enum net_prot prot,
 	}
 
 	key_profile->key_size[pos] = field_size;
+	key_profile->prot_field[pos].type = DPAA2_NET_PROT_KEY;
 	key_profile->prot_field[pos].prot = prot;
 	key_profile->prot_field[pos].key_field = field;
 	key_profile->num++;
@@ -715,6 +777,55 @@ dpaa2_flow_key_profile_advance(enum net_prot prot,
 	return pos;
 }
 
+static int
+dpaa2_flow_faf_add_hdr(int faf_byte,
+	struct dpaa2_dev_priv *priv,
+	enum dpaa2_flow_dist_type dist_type, int tc_id,
+	int *insert_offset)
+{
+	int pos, i, offset;
+	struct dpaa2_key_extract *key_extract;
+	struct dpkg_profile_cfg *dpkg;
+	struct dpkg_extract *extracts;
+
+	if (dist_type == DPAA2_FLOW_QOS_TYPE)
+		key_extract = &priv->extract.qos_key_extract;
+	else
+		key_extract = &priv->extract.tc_key_extract[tc_id];
+
+	dpkg = &key_extract->dpkg;
+	extracts = dpkg->extracts;
+
+	if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
+		DPAA2_PMD_ERR("Number of extracts overflows");
+		return -EINVAL;
+	}
+
+	pos = dpaa2_flow_faf_advance(priv,
+			faf_byte, dist_type, tc_id,
+			insert_offset);
+	if (pos < 0)
+		return pos;
+
+	if (pos != dpkg->num_extracts) {
+		/* Not the last pos, must have IP address extract.*/
+		for (i = dpkg->num_extracts - 1; i >= pos; i--) {
+			memcpy(&extracts[i + 1],
+				&extracts[i], sizeof(struct dpkg_extract));
+		}
+	}
+
+	offset = DPAA2_FAFE_PSR_OFFSET + faf_byte;
+
+	extracts[pos].type = DPKG_EXTRACT_FROM_PARSE;
+	extracts[pos].extract.from_parse.offset = offset;
+	extracts[pos].extract.from_parse.size = 1;
+
+	dpkg->num_extracts++;
+
+	return 0;
+}
+
 static int
 dpaa2_flow_extract_add_hdr(enum net_prot prot,
 	uint32_t field, uint8_t field_size,
@@ -1001,6 +1112,7 @@ dpaa2_flow_extract_add_raw(struct dpaa2_dev_priv *priv,
 			key_profile->key_offset[pos] = 0;
 		}
 		key_profile->key_size[pos] = item_size;
+		key_profile->prot_field[pos].type = DPAA2_NET_PROT_KEY;
 		key_profile->prot_field[pos].prot = NET_PROT_PAYLOAD;
 		key_profile->prot_field[pos].key_field = field;
 
@@ -1040,7 +1152,7 @@ dpaa2_flow_extract_add_raw(struct dpaa2_dev_priv *priv,
 
 static inline int
 dpaa2_flow_extract_search(struct dpaa2_key_profile *key_profile,
-	enum net_prot prot, uint32_t key_field)
+	enum key_prot_type type, enum net_prot prot, uint32_t key_field)
 {
 	int pos;
 	struct key_prot_field *prot_field;
@@ -1053,16 +1165,23 @@ dpaa2_flow_extract_search(struct dpaa2_key_profile *key_profile,
 
 	prot_field = key_profile->prot_field;
 	for (pos = 0; pos < key_profile->num; pos++) {
-		if (prot_field[pos].prot == prot &&
-			prot_field[pos].key_field == key_field) {
+		if (type == DPAA2_NET_PROT_KEY &&
+			prot_field[pos].prot == prot &&
+			prot_field[pos].key_field == key_field &&
+			prot_field[pos].type == type)
+			return pos;
+		else if (type == DPAA2_FAF_KEY &&
+			prot_field[pos].key_field == key_field &&
+			prot_field[pos].type == type)
 			return pos;
-		}
 	}
 
-	if (dpaa2_flow_l4_src_port_extract(prot, key_field)) {
+	if (type == DPAA2_NET_PROT_KEY &&
+		dpaa2_flow_l4_src_port_extract(prot, key_field)) {
 		if (key_profile->l4_src_port_present)
 			return key_profile->l4_src_port_pos;
-	} else if (dpaa2_flow_l4_dst_port_extract(prot, key_field)) {
+	} else if (type == DPAA2_NET_PROT_KEY &&
+		dpaa2_flow_l4_dst_port_extract(prot, key_field)) {
 		if (key_profile->l4_dst_port_present)
 			return key_profile->l4_dst_port_pos;
 	}
@@ -1072,80 +1191,53 @@ dpaa2_flow_extract_search(struct dpaa2_key_profile *key_profile,
 
 static inline int
 dpaa2_flow_extract_key_offset(struct dpaa2_key_profile *key_profile,
-	enum net_prot prot, uint32_t key_field)
+	enum key_prot_type type, enum net_prot prot, uint32_t key_field)
 {
 	int i;
 
-	i = dpaa2_flow_extract_search(key_profile, prot, key_field);
+	i = dpaa2_flow_extract_search(key_profile, type, prot, key_field);
 	if (i >= 0)
 		return key_profile->key_offset[i];
 	else
 		return i;
 }
 
-struct prev_proto_field_id {
-	enum net_prot prot;
-	union {
-		rte_be16_t eth_type;
-		uint8_t ip_proto;
-	};
-};
-
 static int
-dpaa2_flow_prev_proto_rule(struct dpaa2_dev_priv *priv,
+dpaa2_flow_faf_add_rule(struct dpaa2_dev_priv *priv,
 	struct dpaa2_dev_flow *flow,
-	const struct prev_proto_field_id *prev_proto,
+	enum dpaa2_rx_faf_offset faf_bit_off,
 	int group,
 	enum dpaa2_flow_dist_type dist_type)
 {
 	int offset;
 	uint8_t *key_addr;
 	uint8_t *mask_addr;
-	uint32_t field = 0;
-	rte_be16_t eth_type;
-	uint8_t ip_proto;
 	struct dpaa2_key_extract *key_extract;
 	struct dpaa2_key_profile *key_profile;
+	uint8_t faf_byte = faf_bit_off / 8;
+	uint8_t faf_bit_in_byte = faf_bit_off % 8;
 
-	if (prev_proto->prot == NET_PROT_ETH) {
-		field = NH_FLD_ETH_TYPE;
-	} else if (prev_proto->prot == NET_PROT_IP) {
-		field = NH_FLD_IP_PROTO;
-	} else {
-		DPAA2_PMD_ERR("Prev proto(%d) not support!",
-			prev_proto->prot);
-		return -EINVAL;
-	}
+	faf_bit_in_byte = 7 - faf_bit_in_byte;
 
 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
 		key_extract = &priv->extract.qos_key_extract;
 		key_profile = &key_extract->key_profile;
 
 		offset = dpaa2_flow_extract_key_offset(key_profile,
-				prev_proto->prot, field);
+				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
 		if (offset < 0) {
 			DPAA2_PMD_ERR("%s QoS key extract failed", __func__);
 			return -EINVAL;
 		}
 		key_addr = flow->qos_key_addr + offset;
 		mask_addr = flow->qos_mask_addr + offset;
-		if (prev_proto->prot == NET_PROT_ETH) {
-			eth_type = prev_proto->eth_type;
-			memcpy(key_addr, &eth_type, sizeof(rte_be16_t));
-			eth_type = 0xffff;
-			memcpy(mask_addr, &eth_type, sizeof(rte_be16_t));
-			flow->qos_rule_size += sizeof(rte_be16_t);
-		} else if (prev_proto->prot == NET_PROT_IP) {
-			ip_proto = prev_proto->ip_proto;
-			memcpy(key_addr, &ip_proto, sizeof(uint8_t));
-			ip_proto = 0xff;
-			memcpy(mask_addr, &ip_proto, sizeof(uint8_t));
-			flow->qos_rule_size += sizeof(uint8_t);
-		} else {
-			DPAA2_PMD_ERR("Invalid Prev proto(%d)",
-				prev_proto->prot);
-			return -EINVAL;
-		}
+
+		if (!(*key_addr) &&
+			key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
+			flow->qos_rule_size++;
+
+		*key_addr |=  (1 << faf_bit_in_byte);
+		*mask_addr |=  (1 << faf_bit_in_byte);
 	}
 
 	if (dist_type & DPAA2_FLOW_FS_TYPE) {
@@ -1153,7 +1245,7 @@ dpaa2_flow_prev_proto_rule(struct dpaa2_dev_priv *priv,
 		key_profile = &key_extract->key_profile;
 
 		offset = dpaa2_flow_extract_key_offset(key_profile,
-				prev_proto->prot, field);
+				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
 		if (offset < 0) {
 			DPAA2_PMD_ERR("%s TC[%d] key extract failed",
 				__func__, group);
@@ -1162,23 +1254,12 @@ dpaa2_flow_prev_proto_rule(struct dpaa2_dev_priv *priv,
 		key_addr = flow->fs_key_addr + offset;
 		mask_addr = flow->fs_mask_addr + offset;
 
-		if (prev_proto->prot == NET_PROT_ETH) {
-			eth_type = prev_proto->eth_type;
-			memcpy(key_addr, &eth_type, sizeof(rte_be16_t));
-			eth_type = 0xffff;
-			memcpy(mask_addr, &eth_type, sizeof(rte_be16_t));
-			flow->fs_rule_size += sizeof(rte_be16_t);
-		} else if (prev_proto->prot == NET_PROT_IP) {
-			ip_proto = prev_proto->ip_proto;
-			memcpy(key_addr, &ip_proto, sizeof(uint8_t));
-			ip_proto = 0xff;
-			memcpy(mask_addr, &ip_proto, sizeof(uint8_t));
-			flow->fs_rule_size += sizeof(uint8_t);
-		} else {
-			DPAA2_PMD_ERR("Invalid Prev proto(%d)",
-				prev_proto->prot);
-			return -EINVAL;
-		}
+		if (!(*key_addr) &&
+			key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
+			flow->fs_rule_size++;
+
+		*key_addr |=  (1 << faf_bit_in_byte);
+		*mask_addr |=  (1 << faf_bit_in_byte);
 	}
 
 	return 0;
@@ -1200,7 +1281,7 @@ dpaa2_flow_hdr_rule_data_set(struct dpaa2_dev_flow *flow,
 	}
 
 	offset = dpaa2_flow_extract_key_offset(key_profile,
-			prot, field);
+			DPAA2_NET_PROT_KEY, prot, field);
 	if (offset < 0) {
 		DPAA2_PMD_ERR("P(%d)/F(%d) does not exist!",
 			prot, field);
@@ -1238,7 +1319,7 @@ dpaa2_flow_raw_rule_data_set(struct dpaa2_dev_flow *flow,
 	field = extract_offset << DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT;
 	field |= extract_size;
 	offset = dpaa2_flow_extract_key_offset(key_profile,
-			NET_PROT_PAYLOAD, field);
+			DPAA2_NET_PROT_KEY, NET_PROT_PAYLOAD, field);
 	if (offset < 0) {
 		DPAA2_PMD_ERR("offset(%d)/size(%d) raw extract failed",
 			extract_offset, size);
@@ -1321,60 +1402,39 @@ dpaa2_flow_extract_support(const uint8_t *mask_src,
 }
 
 static int
-dpaa2_flow_identify_by_prev_prot(struct dpaa2_dev_priv *priv,
+dpaa2_flow_identify_by_faf(struct dpaa2_dev_priv *priv,
 	struct dpaa2_dev_flow *flow,
-	const struct prev_proto_field_id *prev_prot,
+	enum dpaa2_rx_faf_offset faf_off,
 	enum dpaa2_flow_dist_type dist_type,
 	int group, int *recfg)
 {
-	int ret, index, local_cfg = 0, size = 0;
+	int ret, index, local_cfg = 0;
 	struct dpaa2_key_extract *extract;
 	struct dpaa2_key_profile *key_profile;
-	enum net_prot prot = prev_prot->prot;
-	uint32_t key_field = 0;
-
-	if (prot == NET_PROT_ETH) {
-		key_field = NH_FLD_ETH_TYPE;
-		size = sizeof(rte_be16_t);
-	} else if (prot == NET_PROT_IP) {
-		key_field = NH_FLD_IP_PROTO;
-		size = sizeof(uint8_t);
-	} else if (prot == NET_PROT_IPV4) {
-		prot = NET_PROT_IP;
-		key_field = NH_FLD_IP_PROTO;
-		size = sizeof(uint8_t);
-	} else if (prot == NET_PROT_IPV6) {
-		prot = NET_PROT_IP;
-		key_field = NH_FLD_IP_PROTO;
-		size = sizeof(uint8_t);
-	} else {
-		DPAA2_PMD_ERR("Invalid Prev prot(%d)", prot);
-		return -EINVAL;
-	}
+	uint8_t faf_byte = faf_off / 8;
 
 	if (dist_type & DPAA2_FLOW_QOS_TYPE) {
 		extract = &priv->extract.qos_key_extract;
 		key_profile = &extract->key_profile;
 
 		index = dpaa2_flow_extract_search(key_profile,
-				prot, key_field);
+				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
 		if (index < 0) {
-			ret = dpaa2_flow_extract_add_hdr(prot,
-					key_field, size, priv,
-					DPAA2_FLOW_QOS_TYPE, group,
+			ret = dpaa2_flow_faf_add_hdr(faf_byte,
+					priv, DPAA2_FLOW_QOS_TYPE, group,
 					NULL);
 			if (ret) {
-				DPAA2_PMD_ERR("QOS prev extract add failed");
+				DPAA2_PMD_ERR("QOS faf extract add failed");
 
 				return -EINVAL;
 			}
 			local_cfg |= DPAA2_FLOW_QOS_TYPE;
 		}
 
-		ret = dpaa2_flow_prev_proto_rule(priv, flow, prev_prot, group,
+		ret = dpaa2_flow_faf_add_rule(priv, flow, faf_off, group,
 				DPAA2_FLOW_QOS_TYPE);
 		if (ret) {
-			DPAA2_PMD_ERR("QoS prev rule set failed");
+			DPAA2_PMD_ERR("QoS faf rule set failed");
 			return -EINVAL;
 		}
 	}
@@ -1384,14 +1444,13 @@ dpaa2_flow_identify_by_prev_prot(struct dpaa2_dev_priv *priv,
 		key_profile = &extract->key_profile;
 
 		index = dpaa2_flow_extract_search(key_profile,
-				prot, key_field);
+				DPAA2_FAF_KEY, NET_PROT_NONE, faf_byte);
 		if (index < 0) {
-			ret = dpaa2_flow_extract_add_hdr(prot,
-					key_field, size, priv,
-					DPAA2_FLOW_FS_TYPE, group,
+			ret = dpaa2_flow_faf_add_hdr(faf_byte,
+					priv, DPAA2_FLOW_FS_TYPE, group,
 					NULL);
 			if (ret) {
-				DPAA2_PMD_ERR("FS[%d] prev extract add failed",
+				DPAA2_PMD_ERR("FS[%d] faf extract add failed",
 					group);
 
 				return -EINVAL;
@@ -1399,17 +1458,17 @@ dpaa2_flow_identify_by_prev_prot(struct dpaa2_dev_priv *priv,
 			local_cfg |= DPAA2_FLOW_FS_TYPE;
 		}
 
-		ret = dpaa2_flow_prev_proto_rule(priv, flow, prev_prot, group,
+		ret = dpaa2_flow_faf_add_rule(priv, flow, faf_off, group,
 				DPAA2_FLOW_FS_TYPE);
 		if (ret) {
-			DPAA2_PMD_ERR("FS[%d] prev rule set failed",
+			DPAA2_PMD_ERR("FS[%d] faf rule set failed",
 				group);
 			return -EINVAL;
 		}
 	}
 
 	if (recfg)
-		*recfg = local_cfg;
+		*recfg |= local_cfg;
 
 	return 0;
 }
@@ -1436,7 +1495,7 @@ dpaa2_flow_add_hdr_extract_rule(struct dpaa2_dev_flow *flow,
 	key_profile = &key_extract->key_profile;
 
 	index = dpaa2_flow_extract_search(key_profile,
-			prot, field);
+			DPAA2_NET_PROT_KEY, prot, field);
 	if (index < 0) {
 		ret = dpaa2_flow_extract_add_hdr(prot,
 				field, size, priv,
@@ -1575,6 +1634,7 @@ dpaa2_flow_add_ipaddr_extract_rule(struct dpaa2_dev_flow *flow,
 		key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
 	}
 	key_profile->num++;
+	key_profile->prot_field[num].type = DPAA2_NET_PROT_KEY;
 
 	dpkg->extracts[num].extract.from_hdr.prot = prot;
 	dpkg->extracts[num].extract.from_hdr.field = field;
@@ -1685,15 +1745,28 @@ dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow,
 	spec = pattern->spec;
 	mask = pattern->mask ?
 			pattern->mask : &dpaa2_flow_item_eth_mask;
-	if (!spec) {
-		DPAA2_PMD_WARN("No pattern spec for Eth flow");
-		return -EINVAL;
-	}
 
 	/* Get traffic class index and flow id to be configured */
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
 
+	if (!spec) {
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+				FAF_ETH_FRAM, DPAA2_FLOW_QOS_TYPE,
+				group, &local_cfg);
+		if (ret)
+			return ret;
+
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+				FAF_ETH_FRAM, DPAA2_FLOW_FS_TYPE,
+				group, &local_cfg);
+		if (ret)
+			return ret;
+
+		(*device_configured) |= local_cfg;
+		return 0;
+	}
+
 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
 		RTE_FLOW_ITEM_TYPE_ETH)) {
 		DPAA2_PMD_WARN("Extract field(s) of ethernet failed");
@@ -1782,15 +1855,18 @@ dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow,
 	flow->tc_index = attr->priority;
 
 	if (!spec) {
-		struct prev_proto_field_id prev_proto;
+		ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_VLAN_FRAM,
+						 DPAA2_FLOW_QOS_TYPE, group,
+						 &local_cfg);
+		if (ret)
+			return ret;
 
-		prev_proto.prot = NET_PROT_ETH;
-		prev_proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
-		ret = dpaa2_flow_identify_by_prev_prot(priv, flow, &prev_proto,
-				DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
-				group, &local_cfg);
+		ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_VLAN_FRAM,
+						 DPAA2_FLOW_FS_TYPE, group,
+						 &local_cfg);
 		if (ret)
 			return ret;
+
 		(*device_configured) |= local_cfg;
 		return 0;
 	}
@@ -1837,7 +1913,6 @@ dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
 	const void *key, *mask;
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
 	int size;
-	struct prev_proto_field_id prev_prot;
 
 	group = attr->group;
 
@@ -1850,19 +1925,21 @@ dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
 
-	prev_prot.prot = NET_PROT_ETH;
-	prev_prot.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV4_FRAM,
+					 DPAA2_FLOW_QOS_TYPE, group,
+					 &local_cfg);
+	if (ret)
+		return ret;
 
-	ret = dpaa2_flow_identify_by_prev_prot(priv, flow, &prev_prot,
-			DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE, group,
-			&local_cfg);
-	if (ret) {
-		DPAA2_PMD_ERR("IPv4 identification failed!");
+	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV4_FRAM,
+					 DPAA2_FLOW_FS_TYPE, group, &local_cfg);
+	if (ret)
 		return ret;
-	}
 
-	if (!spec_ipv4)
+	if (!spec_ipv4) {
+		(*device_configured) |= local_cfg;
 		return 0;
+	}
 
 	if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
 				       RTE_FLOW_ITEM_TYPE_IPV4)) {
@@ -1954,7 +2031,6 @@ dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
 	int size;
-	struct prev_proto_field_id prev_prot;
 
 	group = attr->group;
 
@@ -1966,19 +2042,21 @@ dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
 
-	prev_prot.prot = NET_PROT_ETH;
-	prev_prot.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM,
+					 DPAA2_FLOW_QOS_TYPE, group,
+					 &local_cfg);
+	if (ret)
+		return ret;
 
-	ret = dpaa2_flow_identify_by_prev_prot(priv, flow, &prev_prot,
-			DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
-			group, &local_cfg);
-	if (ret) {
-		DPAA2_PMD_ERR("IPv6 identification failed!");
+	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM,
+					 DPAA2_FLOW_FS_TYPE, group, &local_cfg);
+	if (ret)
 		return ret;
-	}
 
-	if (!spec_ipv6)
+	if (!spec_ipv6) {
+		(*device_configured) |= local_cfg;
 		return 0;
+	}
 
 	if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
 				       RTE_FLOW_ITEM_TYPE_IPV6)) {
@@ -2082,18 +2160,15 @@ dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow,
 	flow->tc_index = attr->priority;
 
 	if (!spec) {
-		/* Next proto of Generical IP is actually used
-		 * for ICMP identification.
-		 * Example: flow create 0 ingress pattern icmp
-		 */
-		struct prev_proto_field_id prev_proto;
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+				FAF_ICMP_FRAM, DPAA2_FLOW_QOS_TYPE,
+				group, &local_cfg);
+		if (ret)
+			return ret;
 
-		prev_proto.prot = NET_PROT_IP;
-		prev_proto.ip_proto = IPPROTO_ICMP;
-		ret = dpaa2_flow_identify_by_prev_prot(priv,
-			flow, &prev_proto,
-			DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
-			group, &local_cfg);
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+				FAF_ICMP_FRAM, DPAA2_FLOW_FS_TYPE,
+				group, &local_cfg);
 		if (ret)
 			return ret;
 
@@ -2170,22 +2245,21 @@ dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow,
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
 
-	if (!spec || !mc_l4_port_identification) {
-		struct prev_proto_field_id prev_proto;
+	ret = dpaa2_flow_identify_by_faf(priv, flow,
+			FAF_UDP_FRAM, DPAA2_FLOW_QOS_TYPE,
+			group, &local_cfg);
+	if (ret)
+		return ret;
 
-		prev_proto.prot = NET_PROT_IP;
-		prev_proto.ip_proto = IPPROTO_UDP;
-		ret = dpaa2_flow_identify_by_prev_prot(priv,
-			flow, &prev_proto,
-			DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
+	ret = dpaa2_flow_identify_by_faf(priv, flow,
+			FAF_UDP_FRAM, DPAA2_FLOW_FS_TYPE,
 			group, &local_cfg);
-		if (ret)
-			return ret;
+	if (ret)
+		return ret;
 
+	if (!spec) {
 		(*device_configured) |= local_cfg;
-
-		if (!spec)
-			return 0;
+		return 0;
 	}
 
 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
@@ -2257,22 +2331,21 @@ dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow,
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
 
-	if (!spec || !mc_l4_port_identification) {
-		struct prev_proto_field_id prev_proto;
+	ret = dpaa2_flow_identify_by_faf(priv, flow,
+			FAF_TCP_FRAM, DPAA2_FLOW_QOS_TYPE,
+			group, &local_cfg);
+	if (ret)
+		return ret;
 
-		prev_proto.prot = NET_PROT_IP;
-		prev_proto.ip_proto = IPPROTO_TCP;
-		ret = dpaa2_flow_identify_by_prev_prot(priv,
-			flow, &prev_proto,
-			DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
+	ret = dpaa2_flow_identify_by_faf(priv, flow,
+			FAF_TCP_FRAM, DPAA2_FLOW_FS_TYPE,
 			group, &local_cfg);
-		if (ret)
-			return ret;
+	if (ret)
+		return ret;
 
+	if (!spec) {
 		(*device_configured) |= local_cfg;
-
-		if (!spec)
-			return 0;
+		return 0;
 	}
 
 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
@@ -2344,22 +2417,21 @@ dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow,
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
 
-	if (!spec || !mc_l4_port_identification) {
-		struct prev_proto_field_id prev_proto;
+	ret = dpaa2_flow_identify_by_faf(priv, flow,
+			FAF_SCTP_FRAM, DPAA2_FLOW_QOS_TYPE,
+			group, &local_cfg);
+	if (ret)
+		return ret;
 
-		prev_proto.prot = NET_PROT_IP;
-		prev_proto.ip_proto = IPPROTO_SCTP;
-		ret = dpaa2_flow_identify_by_prev_prot(priv,
-			flow, &prev_proto,
-			DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
+	ret = dpaa2_flow_identify_by_faf(priv, flow,
+			FAF_SCTP_FRAM, DPAA2_FLOW_FS_TYPE,
 			group, &local_cfg);
-		if (ret)
-			return ret;
+	if (ret)
+		return ret;
 
+	if (!spec) {
 		(*device_configured) |= local_cfg;
-
-		if (!spec)
-			return 0;
+		return 0;
 	}
 
 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
@@ -2432,21 +2504,20 @@ dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow,
 	flow->tc_index = attr->priority;
 
 	if (!spec) {
-		struct prev_proto_field_id prev_proto;
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+				FAF_GRE_FRAM, DPAA2_FLOW_QOS_TYPE,
+				group, &local_cfg);
+		if (ret)
+			return ret;
 
-		prev_proto.prot = NET_PROT_IP;
-		prev_proto.ip_proto = IPPROTO_GRE;
-		ret = dpaa2_flow_identify_by_prev_prot(priv,
-			flow, &prev_proto,
-			DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
-			group, &local_cfg);
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+				FAF_GRE_FRAM, DPAA2_FLOW_FS_TYPE,
+				group, &local_cfg);
 		if (ret)
 			return ret;
 
 		(*device_configured) |= local_cfg;
-
-		if (!spec)
-			return 0;
+		return 0;
 	}
 
 	if (dpaa2_flow_extract_support((const uint8_t *)mask,
-- 
2.25.1


  parent reply	other threads:[~2024-09-13  6:04 UTC|newest]

Thread overview: 88+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-13  5:59 [v1 00/43] DPAA2 specific patches vanshika.shukla
2024-09-13  5:59 ` [v1 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-13  5:59 ` [v1 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-13  5:59 ` [v1 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-13  5:59 ` [v1 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-13  5:59 ` [v1 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-13  5:59 ` [v1 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-13  5:59 ` [v1 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-13  5:59 ` [v1 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-13  5:59 ` [v1 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-13  5:59 ` [v1 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-13  5:59 ` [v1 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-13  5:59 ` [v1 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-13  5:59 ` [v1 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-13  5:59 ` [v1 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-13  5:59 ` [v1 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-13  5:59 ` [v1 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-13  5:59 ` [v1 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-13  5:59 ` [v1 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-13  5:59 ` [v1 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-13  5:59 ` [v1 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-13  5:59 ` [v1 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-13  5:59 ` [v1 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-13  5:59 ` [v1 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-13  5:59 ` [v1 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-13  5:59 ` [v1 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-13  5:59 ` [v1 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-13  5:59 ` vanshika.shukla [this message]
2024-09-13  5:59 ` [v1 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-13  5:59 ` [v1 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-13  5:59 ` [v1 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-13  5:59 ` [v1 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-13  5:59 ` [v1 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-13  5:59 ` [v1 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-13  5:59 ` [v1 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-13  5:59 ` [v1 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-13  5:59 ` [v1 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-13  5:59 ` [v1 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-13  5:59 ` [v1 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-13  5:59 ` [v1 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-13  5:59 ` [v1 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-13  5:59 ` [v1 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-13  5:59 ` [v1 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-13  5:59 ` [v1 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-09-18  7:50 ` [v2 00/43] DPAA2 specific patches vanshika.shukla
2024-09-18  7:50   ` [v2 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-18  7:50   ` [v2 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-18  7:50   ` [v2 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-18  7:50   ` [v2 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-18  7:50   ` [v2 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-18  7:50   ` [v2 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-18  7:50   ` [v2 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-18  7:50   ` [v2 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-18  7:50   ` [v2 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-18  7:50   ` [v2 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-18  7:50   ` [v2 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-18  7:50   ` [v2 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-18  7:50   ` [v2 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-18  7:50   ` [v2 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-18  7:50   ` [v2 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-18  7:50   ` [v2 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-18  7:50   ` [v2 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-18  7:50   ` [v2 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-18  7:50   ` [v2 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-18  7:50   ` [v2 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-18  7:50   ` [v2 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-18  7:50   ` [v2 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-18  7:50   ` [v2 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-18  7:50   ` [v2 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-18  7:50   ` [v2 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-18  7:50   ` [v2 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-18  7:50   ` [v2 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-18  7:50   ` [v2 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-18  7:50   ` [v2 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-18  7:50   ` [v2 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-18  7:50   ` [v2 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-18  7:50   ` [v2 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-18  7:50   ` [v2 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-18  7:50   ` [v2 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-18  7:50   ` [v2 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-18  7:50   ` [v2 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-18  7:50   ` [v2 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-18  7:50   ` [v2 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-18  7:50   ` [v2 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-18  7:50   ` [v2 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-18  7:50   ` [v2 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-18  7:50   ` [v2 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-18  7:50   ` [v2 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240913055959.3246917-28-vanshika.shukla@nxp.com \
    --to=vanshika.shukla@nxp.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=jun.yang@nxp.com \
    --cc=sachin.saxena@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).