DPDK patches and discussions
 help / color / mirror / Atom feed
From: Hemant Agrawal <hemant.agrawal@nxp.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com, Jun Yang <jun.yang@nxp.com>
Subject: [dpdk-dev] [PATCH v2 16/29] net/dpaa2: support key extracts of flow API
Date: Tue,  7 Jul 2020 14:52:31 +0530	[thread overview]
Message-ID: <20200707092244.12791-17-hemant.agrawal@nxp.com> (raw)
In-Reply-To: <20200707092244.12791-1-hemant.agrawal@nxp.com>

From: Jun Yang <jun.yang@nxp.com>

1) Support QoS extracts and TC extracts for multiple TCs.

2) Protocol type of L2 extract is used to parse L3.
   Next protocol of L3 extract is used to parse L4.

3) generic IP key extracts instead of IPv4 and IPv6 respectively.

4) Special for IP address extracts:
   Put IP(v4/v6) address extract(s)/rule(s) at the end of extracts array
   to make rest fields at fixed poisition.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.c |   35 +-
 drivers/net/dpaa2/dpaa2_ethdev.h |   43 +-
 drivers/net/dpaa2/dpaa2_flow.c   | 3628 +++++++++++++++++++++---------
 3 files changed, 2665 insertions(+), 1041 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 8edd4b3cd..492b65840 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -1,7 +1,7 @@
 /* * SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016 NXP
+ *   Copyright 2016-2020 NXP
  *
  */
 
@@ -2501,23 +2501,41 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
 	eth_dev->tx_pkt_burst = dpaa2_dev_tx;
 
 	/*Init fields w.r.t. classficaition*/
-	memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg));
+	memset(&priv->extract.qos_key_extract, 0,
+		sizeof(struct dpaa2_key_extract));
 	priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
 	if (!priv->extract.qos_extract_param) {
 		DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
 			    " classificaiton ", ret);
 		goto init_err;
 	}
+	priv->extract.qos_key_extract.key_info.ipv4_src_offset =
+		IP_ADDRESS_OFFSET_INVALID;
+	priv->extract.qos_key_extract.key_info.ipv4_dst_offset =
+		IP_ADDRESS_OFFSET_INVALID;
+	priv->extract.qos_key_extract.key_info.ipv6_src_offset =
+		IP_ADDRESS_OFFSET_INVALID;
+	priv->extract.qos_key_extract.key_info.ipv6_dst_offset =
+		IP_ADDRESS_OFFSET_INVALID;
+
 	for (i = 0; i < MAX_TCS; i++) {
-		memset(&priv->extract.fs_key_cfg[i], 0,
-			sizeof(struct dpkg_profile_cfg));
-		priv->extract.fs_extract_param[i] =
+		memset(&priv->extract.tc_key_extract[i], 0,
+			sizeof(struct dpaa2_key_extract));
+		priv->extract.tc_extract_param[i] =
 			(size_t)rte_malloc(NULL, 256, 64);
-		if (!priv->extract.fs_extract_param[i]) {
+		if (!priv->extract.tc_extract_param[i]) {
 			DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
 				     ret);
 			goto init_err;
 		}
+		priv->extract.tc_key_extract[i].key_info.ipv4_src_offset =
+			IP_ADDRESS_OFFSET_INVALID;
+		priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset =
+			IP_ADDRESS_OFFSET_INVALID;
+		priv->extract.tc_key_extract[i].key_info.ipv6_src_offset =
+			IP_ADDRESS_OFFSET_INVALID;
+		priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset =
+			IP_ADDRESS_OFFSET_INVALID;
 	}
 
 	ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token,
@@ -2593,8 +2611,9 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
 	rte_free(dpni);
 
 	for (i = 0; i < MAX_TCS; i++) {
-		if (priv->extract.fs_extract_param[i])
-			rte_free((void *)(size_t)priv->extract.fs_extract_param[i]);
+		if (priv->extract.tc_extract_param[i])
+			rte_free((void *)
+				(size_t)priv->extract.tc_extract_param[i]);
 	}
 
 	if (priv->extract.qos_extract_param)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index c7fb6539f..030c625e3 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -96,10 +96,39 @@ extern enum pmd_dpaa2_ts dpaa2_enable_ts;
 #define DPAA2_QOS_TABLE_RECONFIGURE	1
 #define DPAA2_FS_TABLE_RECONFIGURE	2
 
+#define DPAA2_QOS_TABLE_IPADDR_EXTRACT 4
+#define DPAA2_FS_TABLE_IPADDR_EXTRACT 8
+
+
 /*Externaly defined*/
 extern const struct rte_flow_ops dpaa2_flow_ops;
 extern enum rte_filter_type dpaa2_filter_type;
 
+#define IP_ADDRESS_OFFSET_INVALID (-1)
+
+struct dpaa2_key_info {
+	uint8_t key_offset[DPKG_MAX_NUM_OF_EXTRACTS];
+	uint8_t key_size[DPKG_MAX_NUM_OF_EXTRACTS];
+	/* Special for IP address. */
+	int ipv4_src_offset;
+	int ipv4_dst_offset;
+	int ipv6_src_offset;
+	int ipv6_dst_offset;
+	uint8_t key_total_size;
+};
+
+struct dpaa2_key_extract {
+	struct dpkg_profile_cfg dpkg;
+	struct dpaa2_key_info key_info;
+};
+
+struct extract_s {
+	struct dpaa2_key_extract qos_key_extract;
+	struct dpaa2_key_extract tc_key_extract[MAX_TCS];
+	uint64_t qos_extract_param;
+	uint64_t tc_extract_param[MAX_TCS];
+};
+
 struct dpaa2_dev_priv {
 	void *hw;
 	int32_t hw_id;
@@ -122,17 +151,9 @@ struct dpaa2_dev_priv {
 	uint8_t max_cgs;
 	uint8_t cgid_in_use[MAX_RX_QUEUES];
 
-	struct pattern_s {
-		uint8_t item_count;
-		uint8_t pattern_type[DPKG_MAX_NUM_OF_EXTRACTS];
-	} pattern[MAX_TCS + 1];
-
-	struct extract_s {
-		struct dpkg_profile_cfg qos_key_cfg;
-		struct dpkg_profile_cfg fs_key_cfg[MAX_TCS];
-		uint64_t qos_extract_param;
-		uint64_t fs_extract_param[MAX_TCS];
-	} extract;
+	struct extract_s extract;
+	uint8_t *qos_index;
+	uint8_t *fs_index;
 
 	uint16_t ss_offset;
 	uint64_t ss_iova;
diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c
index 05d115c78..779cb64ab 100644
--- a/drivers/net/dpaa2/dpaa2_flow.c
+++ b/drivers/net/dpaa2/dpaa2_flow.c
@@ -1,5 +1,5 @@
-/* * SPDX-License-Identifier: BSD-3-Clause
- *   Copyright 2018 NXP
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2020 NXP
  */
 
 #include <sys/queue.h>
@@ -22,15 +22,44 @@
 #include <dpaa2_ethdev.h>
 #include <dpaa2_pmd_logs.h>
 
+/* Workaround to discriminate the UDP/TCP/SCTP
+ * with next protocol of l3.
+ * MC/WRIOP are not able to identify
+ * the l4 protocol with l4 ports.
+ */
+int mc_l4_port_identification;
+
+enum flow_rule_ipaddr_type {
+	FLOW_NONE_IPADDR,
+	FLOW_IPV4_ADDR,
+	FLOW_IPV6_ADDR
+};
+
+struct flow_rule_ipaddr {
+	enum flow_rule_ipaddr_type ipaddr_type;
+	int qos_ipsrc_offset;
+	int qos_ipdst_offset;
+	int fs_ipsrc_offset;
+	int fs_ipdst_offset;
+};
+
 struct rte_flow {
 	LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
-	struct dpni_rule_cfg rule;
+	struct dpni_rule_cfg qos_rule;
+	struct dpni_rule_cfg fs_rule;
+	uint16_t qos_index;
+	uint16_t fs_index;
 	uint8_t key_size;
-	uint8_t tc_id;
+	uint8_t tc_id; /** Traffic Class ID. */
 	uint8_t flow_type;
-	uint8_t index;
+	uint8_t tc_index; /** index within this Traffic Class. */
 	enum rte_flow_action_type action;
 	uint16_t flow_id;
+	/* Special for IP address to specify the offset
+	 * in key/mask.
+	 */
+	struct flow_rule_ipaddr ipaddr_rule;
+	struct dpni_fs_action_cfg action_cfg;
 };
 
 static const
@@ -54,166 +83,717 @@ enum rte_flow_action_type dpaa2_supported_action_type[] = {
 	RTE_FLOW_ACTION_TYPE_RSS
 };
 
+/* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
+#define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
+
 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
 static const void *default_mask;
 
+static inline void dpaa2_flow_extract_key_set(
+	struct dpaa2_key_info *key_info, int index, uint8_t size)
+{
+	key_info->key_size[index] = size;
+	if (index > 0) {
+		key_info->key_offset[index] =
+			key_info->key_offset[index - 1] +
+			key_info->key_size[index - 1];
+	} else {
+		key_info->key_offset[index] = 0;
+	}
+	key_info->key_total_size += size;
+}
+
+static int dpaa2_flow_extract_add(
+	struct dpaa2_key_extract *key_extract,
+	enum net_prot prot,
+	uint32_t field, uint8_t field_size)
+{
+	int index, ip_src = -1, ip_dst = -1;
+	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
+	struct dpaa2_key_info *key_info = &key_extract->key_info;
+
+	if (dpkg->num_extracts >=
+		DPKG_MAX_NUM_OF_EXTRACTS) {
+		DPAA2_PMD_WARN("Number of extracts overflows");
+		return -1;
+	}
+	/* Before reorder, the IP SRC and IP DST are already last
+	 * extract(s).
+	 */
+	for (index = 0; index < dpkg->num_extracts; index++) {
+		if (dpkg->extracts[index].extract.from_hdr.prot ==
+			NET_PROT_IP) {
+			if (dpkg->extracts[index].extract.from_hdr.field ==
+				NH_FLD_IP_SRC) {
+				ip_src = index;
+			}
+			if (dpkg->extracts[index].extract.from_hdr.field ==
+				NH_FLD_IP_DST) {
+				ip_dst = index;
+			}
+		}
+	}
+
+	if (ip_src >= 0)
+		RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
+
+	if (ip_dst >= 0)
+		RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
+
+	if (prot == NET_PROT_IP &&
+		(field == NH_FLD_IP_SRC ||
+		field == NH_FLD_IP_DST)) {
+		index = dpkg->num_extracts;
+	} else {
+		if (ip_src >= 0 && ip_dst >= 0)
+			index = dpkg->num_extracts - 2;
+		else if (ip_src >= 0 || ip_dst >= 0)
+			index = dpkg->num_extracts - 1;
+		else
+			index = dpkg->num_extracts;
+	}
+
+	dpkg->extracts[index].type =	DPKG_EXTRACT_FROM_HDR;
+	dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
+	dpkg->extracts[index].extract.from_hdr.prot = prot;
+	dpkg->extracts[index].extract.from_hdr.field = field;
+	if (prot == NET_PROT_IP &&
+		(field == NH_FLD_IP_SRC ||
+		field == NH_FLD_IP_DST)) {
+		dpaa2_flow_extract_key_set(key_info, index, 0);
+	} else {
+		dpaa2_flow_extract_key_set(key_info, index, field_size);
+	}
+
+	if (prot == NET_PROT_IP) {
+		if (field == NH_FLD_IP_SRC) {
+			if (key_info->ipv4_dst_offset >= 0) {
+				key_info->ipv4_src_offset =
+					key_info->ipv4_dst_offset +
+					NH_FLD_IPV4_ADDR_SIZE;
+			} else {
+				key_info->ipv4_src_offset =
+					key_info->key_offset[index - 1] +
+						key_info->key_size[index - 1];
+			}
+			if (key_info->ipv6_dst_offset >= 0) {
+				key_info->ipv6_src_offset =
+					key_info->ipv6_dst_offset +
+					NH_FLD_IPV6_ADDR_SIZE;
+			} else {
+				key_info->ipv6_src_offset =
+					key_info->key_offset[index - 1] +
+						key_info->key_size[index - 1];
+			}
+		} else if (field == NH_FLD_IP_DST) {
+			if (key_info->ipv4_src_offset >= 0) {
+				key_info->ipv4_dst_offset =
+					key_info->ipv4_src_offset +
+					NH_FLD_IPV4_ADDR_SIZE;
+			} else {
+				key_info->ipv4_dst_offset =
+					key_info->key_offset[index - 1] +
+						key_info->key_size[index - 1];
+			}
+			if (key_info->ipv6_src_offset >= 0) {
+				key_info->ipv6_dst_offset =
+					key_info->ipv6_src_offset +
+					NH_FLD_IPV6_ADDR_SIZE;
+			} else {
+				key_info->ipv6_dst_offset =
+					key_info->key_offset[index - 1] +
+						key_info->key_size[index - 1];
+			}
+		}
+	}
+
+	if (index == dpkg->num_extracts) {
+		dpkg->num_extracts++;
+		return 0;
+	}
+
+	if (ip_src >= 0) {
+		ip_src++;
+		dpkg->extracts[ip_src].type =
+			DPKG_EXTRACT_FROM_HDR;
+		dpkg->extracts[ip_src].extract.from_hdr.type =
+			DPKG_FULL_FIELD;
+		dpkg->extracts[ip_src].extract.from_hdr.prot =
+			NET_PROT_IP;
+		dpkg->extracts[ip_src].extract.from_hdr.field =
+			NH_FLD_IP_SRC;
+		dpaa2_flow_extract_key_set(key_info, ip_src, 0);
+		key_info->ipv4_src_offset += field_size;
+		key_info->ipv6_src_offset += field_size;
+	}
+	if (ip_dst >= 0) {
+		ip_dst++;
+		dpkg->extracts[ip_dst].type =
+			DPKG_EXTRACT_FROM_HDR;
+		dpkg->extracts[ip_dst].extract.from_hdr.type =
+			DPKG_FULL_FIELD;
+		dpkg->extracts[ip_dst].extract.from_hdr.prot =
+			NET_PROT_IP;
+		dpkg->extracts[ip_dst].extract.from_hdr.field =
+			NH_FLD_IP_DST;
+		dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
+		key_info->ipv4_dst_offset += field_size;
+		key_info->ipv6_dst_offset += field_size;
+	}
+
+	dpkg->num_extracts++;
+
+	return 0;
+}
+
+/* Protocol discrimination.
+ * Discriminate IPv4/IPv6/vLan by Eth type.
+ * Discriminate UDP/TCP/ICMP by next proto of IP.
+ */
+static inline int
+dpaa2_flow_proto_discrimination_extract(
+	struct dpaa2_key_extract *key_extract,
+	enum rte_flow_item_type type)
+{
+	if (type == RTE_FLOW_ITEM_TYPE_ETH) {
+		return dpaa2_flow_extract_add(
+				key_extract, NET_PROT_ETH,
+				NH_FLD_ETH_TYPE,
+				sizeof(rte_be16_t));
+	} else if (type == (enum rte_flow_item_type)
+		DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
+		return dpaa2_flow_extract_add(
+				key_extract, NET_PROT_IP,
+				NH_FLD_IP_PROTO,
+				NH_FLD_IP_PROTO_SIZE);
+	}
+
+	return -1;
+}
+
+static inline int dpaa2_flow_extract_search(
+	struct dpkg_profile_cfg *dpkg,
+	enum net_prot prot, uint32_t field)
+{
+	int i;
+
+	for (i = 0; i < dpkg->num_extracts; i++) {
+		if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
+			dpkg->extracts[i].extract.from_hdr.field == field) {
+			return i;
+		}
+	}
+
+	return -1;
+}
+
+static inline int dpaa2_flow_extract_key_offset(
+	struct dpaa2_key_extract *key_extract,
+	enum net_prot prot, uint32_t field)
+{
+	int i;
+	struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
+	struct dpaa2_key_info *key_info = &key_extract->key_info;
+
+	if (prot == NET_PROT_IPV4 ||
+		prot == NET_PROT_IPV6)
+		i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
+	else
+		i = dpaa2_flow_extract_search(dpkg, prot, field);
+
+	if (i >= 0) {
+		if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
+			return key_info->ipv4_src_offset;
+		else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
+			return key_info->ipv4_dst_offset;
+		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
+			return key_info->ipv6_src_offset;
+		else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
+			return key_info->ipv6_dst_offset;
+		else
+			return key_info->key_offset[i];
+	} else {
+		return -1;
+	}
+}
+
+struct proto_discrimination {
+	enum rte_flow_item_type type;
+	union {
+		rte_be16_t eth_type;
+		uint8_t ip_proto;
+	};
+};
+
+static int
+dpaa2_flow_proto_discrimination_rule(
+	struct dpaa2_dev_priv *priv, struct rte_flow *flow,
+	struct proto_discrimination proto, int group)
+{
+	enum net_prot prot;
+	uint32_t field;
+	int offset;
+	size_t key_iova;
+	size_t mask_iova;
+	rte_be16_t eth_type;
+	uint8_t ip_proto;
+
+	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
+		prot = NET_PROT_ETH;
+		field = NH_FLD_ETH_TYPE;
+	} else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
+		prot = NET_PROT_IP;
+		field = NH_FLD_IP_PROTO;
+	} else {
+		DPAA2_PMD_ERR(
+			"Only Eth and IP support to discriminate next proto.");
+		return -1;
+	}
+
+	offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
+			prot, field);
+	if (offset < 0) {
+		DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
+				prot, field);
+		return -1;
+	}
+	key_iova = flow->qos_rule.key_iova + offset;
+	mask_iova = flow->qos_rule.mask_iova + offset;
+	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
+		eth_type = proto.eth_type;
+		memcpy((void *)key_iova, (const void *)(&eth_type),
+			sizeof(rte_be16_t));
+		eth_type = 0xffff;
+		memcpy((void *)mask_iova, (const void *)(&eth_type),
+			sizeof(rte_be16_t));
+	} else {
+		ip_proto = proto.ip_proto;
+		memcpy((void *)key_iova, (const void *)(&ip_proto),
+			sizeof(uint8_t));
+		ip_proto = 0xff;
+		memcpy((void *)mask_iova, (const void *)(&ip_proto),
+			sizeof(uint8_t));
+	}
+
+	offset = dpaa2_flow_extract_key_offset(
+			&priv->extract.tc_key_extract[group],
+			prot, field);
+	if (offset < 0) {
+		DPAA2_PMD_ERR("FS prot %d field %d extract failed",
+				prot, field);
+		return -1;
+	}
+	key_iova = flow->fs_rule.key_iova + offset;
+	mask_iova = flow->fs_rule.mask_iova + offset;
+
+	if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
+		eth_type = proto.eth_type;
+		memcpy((void *)key_iova, (const void *)(&eth_type),
+			sizeof(rte_be16_t));
+		eth_type = 0xffff;
+		memcpy((void *)mask_iova, (const void *)(&eth_type),
+			sizeof(rte_be16_t));
+	} else {
+		ip_proto = proto.ip_proto;
+		memcpy((void *)key_iova, (const void *)(&ip_proto),
+			sizeof(uint8_t));
+		ip_proto = 0xff;
+		memcpy((void *)mask_iova, (const void *)(&ip_proto),
+			sizeof(uint8_t));
+	}
+
+	return 0;
+}
+
+static inline int
+dpaa2_flow_rule_data_set(
+	struct dpaa2_key_extract *key_extract,
+	struct dpni_rule_cfg *rule,
+	enum net_prot prot, uint32_t field,
+	const void *key, const void *mask, int size)
+{
+	int offset = dpaa2_flow_extract_key_offset(key_extract,
+				prot, field);
+
+	if (offset < 0) {
+		DPAA2_PMD_ERR("prot %d, field %d extract failed",
+			prot, field);
+		return -1;
+	}
+	memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
+	memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
+
+	return 0;
+}
+
+static inline int
+_dpaa2_flow_rule_move_ipaddr_tail(
+	struct dpaa2_key_extract *key_extract,
+	struct dpni_rule_cfg *rule, int src_offset,
+	uint32_t field, bool ipv4)
+{
+	size_t key_src;
+	size_t mask_src;
+	size_t key_dst;
+	size_t mask_dst;
+	int dst_offset, len;
+	enum net_prot prot;
+	char tmp[NH_FLD_IPV6_ADDR_SIZE];
+
+	if (field != NH_FLD_IP_SRC &&
+		field != NH_FLD_IP_DST) {
+		DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
+		return -1;
+	}
+	if (ipv4)
+		prot = NET_PROT_IPV4;
+	else
+		prot = NET_PROT_IPV6;
+	dst_offset = dpaa2_flow_extract_key_offset(key_extract,
+				prot, field);
+	if (dst_offset < 0) {
+		DPAA2_PMD_ERR("Field %d reorder extract failed", field);
+		return -1;
+	}
+	key_src = rule->key_iova + src_offset;
+	mask_src = rule->mask_iova + src_offset;
+	key_dst = rule->key_iova + dst_offset;
+	mask_dst = rule->mask_iova + dst_offset;
+	if (ipv4)
+		len = sizeof(rte_be32_t);
+	else
+		len = NH_FLD_IPV6_ADDR_SIZE;
+
+	memcpy(tmp, (char *)key_src, len);
+	memcpy((char *)key_dst, tmp, len);
+
+	memcpy(tmp, (char *)mask_src, len);
+	memcpy((char *)mask_dst, tmp, len);
+
+	return 0;
+}
+
+static inline int
+dpaa2_flow_rule_move_ipaddr_tail(
+	struct rte_flow *flow, struct dpaa2_dev_priv *priv,
+	int fs_group)
+{
+	int ret;
+	enum net_prot prot;
+
+	if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
+		return 0;
+
+	if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
+		prot = NET_PROT_IPV4;
+	else
+		prot = NET_PROT_IPV6;
+
+	if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
+		ret = _dpaa2_flow_rule_move_ipaddr_tail(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				flow->ipaddr_rule.qos_ipsrc_offset,
+				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
+		if (ret) {
+			DPAA2_PMD_ERR("QoS src address reorder failed");
+			return -1;
+		}
+		flow->ipaddr_rule.qos_ipsrc_offset =
+			dpaa2_flow_extract_key_offset(
+				&priv->extract.qos_key_extract,
+				prot, NH_FLD_IP_SRC);
+	}
+
+	if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
+		ret = _dpaa2_flow_rule_move_ipaddr_tail(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				flow->ipaddr_rule.qos_ipdst_offset,
+				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
+		if (ret) {
+			DPAA2_PMD_ERR("QoS dst address reorder failed");
+			return -1;
+		}
+		flow->ipaddr_rule.qos_ipdst_offset =
+			dpaa2_flow_extract_key_offset(
+				&priv->extract.qos_key_extract,
+				prot, NH_FLD_IP_DST);
+	}
+
+	if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
+		ret = _dpaa2_flow_rule_move_ipaddr_tail(
+				&priv->extract.tc_key_extract[fs_group],
+				&flow->fs_rule,
+				flow->ipaddr_rule.fs_ipsrc_offset,
+				NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
+		if (ret) {
+			DPAA2_PMD_ERR("FS src address reorder failed");
+			return -1;
+		}
+		flow->ipaddr_rule.fs_ipsrc_offset =
+			dpaa2_flow_extract_key_offset(
+				&priv->extract.tc_key_extract[fs_group],
+				prot, NH_FLD_IP_SRC);
+	}
+	if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
+		ret = _dpaa2_flow_rule_move_ipaddr_tail(
+				&priv->extract.tc_key_extract[fs_group],
+				&flow->fs_rule,
+				flow->ipaddr_rule.fs_ipdst_offset,
+				NH_FLD_IP_DST, prot == NET_PROT_IPV4);
+		if (ret) {
+			DPAA2_PMD_ERR("FS dst address reorder failed");
+			return -1;
+		}
+		flow->ipaddr_rule.fs_ipdst_offset =
+			dpaa2_flow_extract_key_offset(
+				&priv->extract.tc_key_extract[fs_group],
+				prot, NH_FLD_IP_DST);
+	}
+
+	return 0;
+}
+
 static int
 dpaa2_configure_flow_eth(struct rte_flow *flow,
 			 struct rte_eth_dev *dev,
 			 const struct rte_flow_attr *attr,
 			 const struct rte_flow_item *pattern,
 			 const struct rte_flow_action actions[] __rte_unused,
-			 struct rte_flow_error *error __rte_unused)
+			 struct rte_flow_error *error __rte_unused,
+			 int *device_configured)
 {
-	int index, j = 0;
-	size_t key_iova;
-	size_t mask_iova;
-	int device_configured = 0, entry_found = 0;
+	int index, ret;
+	int local_cfg = 0;
 	uint32_t group;
 	const struct rte_flow_item_eth *spec, *mask;
 
 	/* TODO: Currently upper bound of range parameter is not implemented */
 	const struct rte_flow_item_eth *last __rte_unused;
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
 
 	group = attr->group;
 
-	/* DPAA2 platform has a limitation that extract parameter can not be */
-	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
-	/* TODO: pattern is an array of 9 elements where 9th pattern element */
-	/* is for QoS table and 1-8th pattern element is for FS tables. */
-	/* It can be changed to macro. */
-	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
+	/* Parse pattern list to get the matching parameters */
+	spec    = (const struct rte_flow_item_eth *)pattern->spec;
+	last    = (const struct rte_flow_item_eth *)pattern->last;
+	mask    = (const struct rte_flow_item_eth *)
+		(pattern->mask ? pattern->mask : default_mask);
+	if (!spec) {
+		/* Don't care any field of eth header,
+		 * only care eth protocol.
+		 */
+		DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
+		return 0;
 	}
 
-	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+	/* Get traffic class index and flow id to be configured */
+	flow->tc_id = group;
+	flow->tc_index = attr->priority;
+
+	if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_ETH, NH_FLD_ETH_SA);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.qos_key_extract,
+					NET_PROT_ETH, NH_FLD_ETH_SA,
+					RTE_ETHER_ADDR_LEN);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
 
-	for (j = 0; j < priv->pattern[8].item_count; j++) {
-		if (priv->pattern[8].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+		}
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_ETH, NH_FLD_ETH_SA);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_ETH, NH_FLD_ETH_SA,
+					RTE_ETHER_ADDR_LEN);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[8].pattern_type[j] = pattern->type;
-		priv->pattern[8].item_count++;
-		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
-	}
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move ipaddr before ETH_SA rule set failed");
+			return -1;
+		}
 
-	entry_found = 0;
-	for (j = 0; j < priv->pattern[group].item_count; j++) {
-		if (priv->pattern[group].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_ETH,
+				NH_FLD_ETH_SA,
+				&spec->src.addr_bytes,
+				&mask->src.addr_bytes,
+				sizeof(struct rte_ether_addr));
+		if (ret) {
+			DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
+			return -1;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[group].pattern_type[j] = pattern->type;
-		priv->pattern[group].item_count++;
-		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				NET_PROT_ETH,
+				NH_FLD_ETH_SA,
+				&spec->src.addr_bytes,
+				&mask->src.addr_bytes,
+				sizeof(struct rte_ether_addr));
+		if (ret) {
+			DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
+			return -1;
+		}
 	}
 
-	/* Get traffic class index and flow id to be configured */
-	flow->tc_id = group;
-	flow->index = attr->priority;
-
-	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
-		index = priv->extract.qos_key_cfg.num_extracts;
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
-		index++;
-
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
-		index++;
-
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
-		index++;
-
-		priv->extract.qos_key_cfg.num_extracts = index;
-	}
-
-	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
-		index = priv->extract.fs_key_cfg[group].num_extracts;
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
-		index++;
-
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
-		index++;
-
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
-		index++;
-
-		priv->extract.fs_key_cfg[group].num_extracts = index;
+	if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_ETH, NH_FLD_ETH_DA);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.qos_key_extract,
+					NET_PROT_ETH, NH_FLD_ETH_DA,
+					RTE_ETHER_ADDR_LEN);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+		}
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_ETH, NH_FLD_ETH_DA);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_ETH, NH_FLD_ETH_DA,
+					RTE_ETHER_ADDR_LEN);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+		}
+
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move ipaddr before ETH DA rule set failed");
+			return -1;
+		}
+
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_ETH,
+				NH_FLD_ETH_DA,
+				&spec->dst.addr_bytes,
+				&mask->dst.addr_bytes,
+				sizeof(struct rte_ether_addr));
+		if (ret) {
+			DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
+			return -1;
+		}
+
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				NET_PROT_ETH,
+				NH_FLD_ETH_DA,
+				&spec->dst.addr_bytes,
+				&mask->dst.addr_bytes,
+				sizeof(struct rte_ether_addr));
+		if (ret) {
+			DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
+			return -1;
+		}
 	}
 
-	/* Parse pattern list to get the matching parameters */
-	spec	= (const struct rte_flow_item_eth *)pattern->spec;
-	last	= (const struct rte_flow_item_eth *)pattern->last;
-	mask	= (const struct rte_flow_item_eth *)
-			(pattern->mask ? pattern->mask : default_mask);
+	if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_ETH, NH_FLD_ETH_TYPE);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.qos_key_extract,
+					NET_PROT_ETH, NH_FLD_ETH_TYPE,
+					RTE_ETHER_TYPE_LEN);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
 
-	/* Key rule */
-	key_iova = flow->rule.key_iova + flow->key_size;
-	memcpy((void *)key_iova, (const void *)(spec->src.addr_bytes),
-						sizeof(struct rte_ether_addr));
-	key_iova += sizeof(struct rte_ether_addr);
-	memcpy((void *)key_iova, (const void *)(spec->dst.addr_bytes),
-						sizeof(struct rte_ether_addr));
-	key_iova += sizeof(struct rte_ether_addr);
-	memcpy((void *)key_iova, (const void *)(&spec->type),
-						sizeof(rte_be16_t));
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+		}
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_ETH, NH_FLD_ETH_TYPE);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_ETH, NH_FLD_ETH_TYPE,
+					RTE_ETHER_TYPE_LEN);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
 
-	/* Key mask */
-	mask_iova = flow->rule.mask_iova + flow->key_size;
-	memcpy((void *)mask_iova, (const void *)(mask->src.addr_bytes),
-						sizeof(struct rte_ether_addr));
-	mask_iova += sizeof(struct rte_ether_addr);
-	memcpy((void *)mask_iova, (const void *)(mask->dst.addr_bytes),
-						sizeof(struct rte_ether_addr));
-	mask_iova += sizeof(struct rte_ether_addr);
-	memcpy((void *)mask_iova, (const void *)(&mask->type),
-						sizeof(rte_be16_t));
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+		}
+
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move ipaddr before ETH TYPE rule set failed");
+				return -1;
+		}
 
-	flow->key_size += ((2  * sizeof(struct rte_ether_addr)) +
-					sizeof(rte_be16_t));
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_ETH,
+				NH_FLD_ETH_TYPE,
+				&spec->type,
+				&mask->type,
+				sizeof(rte_be16_t));
+		if (ret) {
+			DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
+			return -1;
+		}
 
-	return device_configured;
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				NET_PROT_ETH,
+				NH_FLD_ETH_TYPE,
+				&spec->type,
+				&mask->type,
+				sizeof(rte_be16_t));
+		if (ret) {
+			DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
+			return -1;
+		}
+	}
+
+	(*device_configured) |= local_cfg;
+
+	return 0;
 }
 
 static int
@@ -222,12 +802,11 @@ dpaa2_configure_flow_vlan(struct rte_flow *flow,
 			  const struct rte_flow_attr *attr,
 			  const struct rte_flow_item *pattern,
 			  const struct rte_flow_action actions[] __rte_unused,
-			  struct rte_flow_error *error __rte_unused)
+			  struct rte_flow_error *error __rte_unused,
+			  int *device_configured)
 {
-	int index, j = 0;
-	size_t key_iova;
-	size_t mask_iova;
-	int device_configured = 0, entry_found = 0;
+	int index, ret;
+	int local_cfg = 0;
 	uint32_t group;
 	const struct rte_flow_item_vlan *spec, *mask;
 
@@ -236,375 +815,524 @@ dpaa2_configure_flow_vlan(struct rte_flow *flow,
 
 	group = attr->group;
 
-	/* DPAA2 platform has a limitation that extract parameter can not be */
-	/*  more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
-	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+	/* Parse pattern list to get the matching parameters */
+	spec    = (const struct rte_flow_item_vlan *)pattern->spec;
+	last    = (const struct rte_flow_item_vlan *)pattern->last;
+	mask    = (const struct rte_flow_item_vlan *)
+		(pattern->mask ? pattern->mask : default_mask);
 
-	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+	/* Get traffic class index and flow id to be configured */
+	flow->tc_id = group;
+	flow->tc_index = attr->priority;
+
+	if (!spec) {
+		/* Don't care any field of vlan header,
+		 * only care vlan protocol.
+		 */
+		/* Eth type is actually used for vLan classification.
+		 */
+		struct proto_discrimination proto;
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_ETH, NH_FLD_ETH_TYPE);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+						&priv->extract.qos_key_extract,
+						RTE_FLOW_ITEM_TYPE_ETH);
+			if (ret) {
+				DPAA2_PMD_ERR(
+				"QoS Ext ETH_TYPE to discriminate vLan failed");
 
-	for (j = 0; j < priv->pattern[8].item_count; j++) {
-		if (priv->pattern[8].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[8].pattern_type[j] = pattern->type;
-		priv->pattern[8].item_count++;
-		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
-	}
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_ETH, NH_FLD_ETH_TYPE);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+					&priv->extract.tc_key_extract[group],
+					RTE_FLOW_ITEM_TYPE_ETH);
+			if (ret) {
+				DPAA2_PMD_ERR(
+				"FS Ext ETH_TYPE to discriminate vLan failed.");
 
-	entry_found = 0;
-	for (j = 0; j < priv->pattern[group].item_count; j++) {
-		if (priv->pattern[group].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+		}
+
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+			"Move ipaddr before vLan discrimination set failed");
+			return -1;
+		}
+
+		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
+		proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
+		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
+							proto, group);
+		if (ret) {
+			DPAA2_PMD_ERR("vLan discrimination rule set failed");
+			return -1;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[group].pattern_type[j] = pattern->type;
-		priv->pattern[group].item_count++;
-		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+		(*device_configured) |= local_cfg;
+
+		return 0;
 	}
 
+	if (!mask->tci)
+		return 0;
 
-	/* Get traffic class index and flow id to be configured */
-	flow->tc_id = group;
-	flow->index = attr->priority;
+	index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_VLAN, NH_FLD_VLAN_TCI);
+	if (index < 0) {
+		ret = dpaa2_flow_extract_add(
+						&priv->extract.qos_key_extract,
+						NET_PROT_VLAN,
+						NH_FLD_VLAN_TCI,
+						sizeof(rte_be16_t));
+		if (ret) {
+			DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
 
-	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
-		index = priv->extract.qos_key_cfg.num_extracts;
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
-		priv->extract.qos_key_cfg.num_extracts++;
+			return -1;
+		}
+		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+	}
+
+	index = dpaa2_flow_extract_search(
+			&priv->extract.tc_key_extract[group].dpkg,
+			NET_PROT_VLAN, NH_FLD_VLAN_TCI);
+	if (index < 0) {
+		ret = dpaa2_flow_extract_add(
+				&priv->extract.tc_key_extract[group],
+				NET_PROT_VLAN,
+				NH_FLD_VLAN_TCI,
+				sizeof(rte_be16_t));
+		if (ret) {
+			DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
+
+			return -1;
+		}
+		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
 	}
 
-	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
-		index = priv->extract.fs_key_cfg[group].num_extracts;
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
-		priv->extract.fs_key_cfg[group].num_extracts++;
+	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+	if (ret) {
+		DPAA2_PMD_ERR(
+			"Move ipaddr before VLAN TCI rule set failed");
+		return -1;
 	}
 
-	/* Parse pattern list to get the matching parameters */
-	spec	= (const struct rte_flow_item_vlan *)pattern->spec;
-	last	= (const struct rte_flow_item_vlan *)pattern->last;
-	mask	= (const struct rte_flow_item_vlan *)
-			(pattern->mask ? pattern->mask : default_mask);
+	ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_VLAN,
+				NH_FLD_VLAN_TCI,
+				&spec->tci,
+				&mask->tci,
+				sizeof(rte_be16_t));
+	if (ret) {
+		DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
+		return -1;
+	}
 
-	key_iova = flow->rule.key_iova + flow->key_size;
-	memcpy((void *)key_iova, (const void *)(&spec->tci),
-							sizeof(rte_be16_t));
+	ret = dpaa2_flow_rule_data_set(
+			&priv->extract.tc_key_extract[group],
+			&flow->fs_rule,
+			NET_PROT_VLAN,
+			NH_FLD_VLAN_TCI,
+			&spec->tci,
+			&mask->tci,
+			sizeof(rte_be16_t));
+	if (ret) {
+		DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
+		return -1;
+	}
 
-	mask_iova = flow->rule.mask_iova + flow->key_size;
-	memcpy((void *)mask_iova, (const void *)(&mask->tci),
-							sizeof(rte_be16_t));
+	(*device_configured) |= local_cfg;
 
-	flow->key_size += sizeof(rte_be16_t);
-	return device_configured;
+	return 0;
 }
 
 static int
-dpaa2_configure_flow_ipv4(struct rte_flow *flow,
-			  struct rte_eth_dev *dev,
-			  const struct rte_flow_attr *attr,
-			  const struct rte_flow_item *pattern,
-			  const struct rte_flow_action actions[] __rte_unused,
-			  struct rte_flow_error *error __rte_unused)
+dpaa2_configure_flow_generic_ip(
+	struct rte_flow *flow,
+	struct rte_eth_dev *dev,
+	const struct rte_flow_attr *attr,
+	const struct rte_flow_item *pattern,
+	const struct rte_flow_action actions[] __rte_unused,
+	struct rte_flow_error *error __rte_unused,
+	int *device_configured)
 {
-	int index, j = 0;
-	size_t key_iova;
-	size_t mask_iova;
-	int device_configured = 0, entry_found = 0;
+	int index, ret;
+	int local_cfg = 0;
 	uint32_t group;
-	const struct rte_flow_item_ipv4 *spec, *mask;
+	const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
+		*mask_ipv4 = 0;
+	const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
+		*mask_ipv6 = 0;
+	const void *key, *mask;
+	enum net_prot prot;
 
-	const struct rte_flow_item_ipv4 *last __rte_unused;
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
+	int size;
 
 	group = attr->group;
 
-	/* DPAA2 platform has a limitation that extract parameter can not be */
-	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
-	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
+	/* Parse pattern list to get the matching parameters */
+	if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+		spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
+		mask_ipv4 = (const struct rte_flow_item_ipv4 *)
+			(pattern->mask ? pattern->mask : default_mask);
+	} else {
+		spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
+		mask_ipv6 = (const struct rte_flow_item_ipv6 *)
+			(pattern->mask ? pattern->mask : default_mask);
 	}
 
-	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+	/* Get traffic class index and flow id to be configured */
+	flow->tc_id = group;
+	flow->tc_index = attr->priority;
+
+	if (!spec_ipv4 && !spec_ipv6) {
+		/* Don't care any field of IP header,
+		 * only care IP protocol.
+		 * Example: flow create 0 ingress pattern ipv6 /
+		 */
+		/* Eth type is actually used for IP identification.
+		 */
+		/* TODO: Current design only supports Eth + IP,
+		 *  Eth + vLan + IP needs to add.
+		 */
+		struct proto_discrimination proto;
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_ETH, NH_FLD_ETH_TYPE);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+					&priv->extract.qos_key_extract,
+					RTE_FLOW_ITEM_TYPE_ETH);
+			if (ret) {
+				DPAA2_PMD_ERR(
+				"QoS Ext ETH_TYPE to discriminate IP failed.");
 
-	for (j = 0; j < priv->pattern[8].item_count; j++) {
-		if (priv->pattern[8].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[8].pattern_type[j] = pattern->type;
-		priv->pattern[8].item_count++;
-		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
-	}
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_ETH, NH_FLD_ETH_TYPE);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+					&priv->extract.tc_key_extract[group],
+					RTE_FLOW_ITEM_TYPE_ETH);
+			if (ret) {
+				DPAA2_PMD_ERR(
+				"FS Ext ETH_TYPE to discriminate IP failed");
 
-	entry_found = 0;
-	for (j = 0; j < priv->pattern[group].item_count; j++) {
-		if (priv->pattern[group].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[group].pattern_type[j] = pattern->type;
-		priv->pattern[group].item_count++;
-		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
-	}
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+			"Move ipaddr before IP discrimination set failed");
+			return -1;
+		}
 
-	/* Get traffic class index and flow id to be configured */
-	flow->tc_id = group;
-	flow->index = attr->priority;
-
-	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
-		index = priv->extract.qos_key_cfg.num_extracts;
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
-		index++;
-
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
-		index++;
-
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
-		index++;
-
-		priv->extract.qos_key_cfg.num_extracts = index;
-	}
-
-	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
-		index = priv->extract.fs_key_cfg[group].num_extracts;
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
-		index++;
-
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
-		index++;
-
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
-		index++;
-
-		priv->extract.fs_key_cfg[group].num_extracts = index;
-	}
+		proto.type = RTE_FLOW_ITEM_TYPE_ETH;
+		if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
+			proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+		else
+			proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
+							proto, group);
+		if (ret) {
+			DPAA2_PMD_ERR("IP discrimination rule set failed");
+			return -1;
+		}
 
-	/* Parse pattern list to get the matching parameters */
-	spec	= (const struct rte_flow_item_ipv4 *)pattern->spec;
-	last	= (const struct rte_flow_item_ipv4 *)pattern->last;
-	mask	= (const struct rte_flow_item_ipv4 *)
-			(pattern->mask ? pattern->mask : default_mask);
+		(*device_configured) |= local_cfg;
+
+		return 0;
+	}
+
+	if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
+		mask_ipv4->hdr.dst_addr)) {
+		flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
+	} else if (mask_ipv6 &&
+		(memcmp((const char *)mask_ipv6->hdr.src_addr,
+				zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
+		memcmp((const char *)mask_ipv6->hdr.dst_addr,
+				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
+		flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
+	}
+
+	if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
+		(mask_ipv6 &&
+			memcmp((const char *)mask_ipv6->hdr.src_addr,
+				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_IP, NH_FLD_IP_SRC);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+						&priv->extract.qos_key_extract,
+						NET_PROT_IP,
+						NH_FLD_IP_SRC,
+						0);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
 
-	key_iova = flow->rule.key_iova + flow->key_size;
-	memcpy((void *)key_iova, (const void *)&spec->hdr.src_addr,
-							sizeof(uint32_t));
-	key_iova += sizeof(uint32_t);
-	memcpy((void *)key_iova, (const void *)&spec->hdr.dst_addr,
-							sizeof(uint32_t));
-	key_iova += sizeof(uint32_t);
-	memcpy((void *)key_iova, (const void *)&spec->hdr.next_proto_id,
-							sizeof(uint8_t));
-
-	mask_iova = flow->rule.mask_iova + flow->key_size;
-	memcpy((void *)mask_iova, (const void *)&mask->hdr.src_addr,
-							sizeof(uint32_t));
-	mask_iova += sizeof(uint32_t);
-	memcpy((void *)mask_iova, (const void *)&mask->hdr.dst_addr,
-							sizeof(uint32_t));
-	mask_iova += sizeof(uint32_t);
-	memcpy((void *)mask_iova, (const void *)&mask->hdr.next_proto_id,
-							sizeof(uint8_t));
-
-	flow->key_size += (2 * sizeof(uint32_t)) + sizeof(uint8_t);
-	return device_configured;
-}
+				return -1;
+			}
+			local_cfg |= (DPAA2_QOS_TABLE_RECONFIGURE |
+				DPAA2_QOS_TABLE_IPADDR_EXTRACT);
+		}
 
-static int
-dpaa2_configure_flow_ipv6(struct rte_flow *flow,
-			  struct rte_eth_dev *dev,
-			  const struct rte_flow_attr *attr,
-			  const struct rte_flow_item *pattern,
-			  const struct rte_flow_action actions[] __rte_unused,
-			  struct rte_flow_error *error __rte_unused)
-{
-	int index, j = 0;
-	size_t key_iova;
-	size_t mask_iova;
-	int device_configured = 0, entry_found = 0;
-	uint32_t group;
-	const struct rte_flow_item_ipv6 *spec, *mask;
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_IP, NH_FLD_IP_SRC);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_IP,
+					NH_FLD_IP_SRC,
+					0);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
 
-	const struct rte_flow_item_ipv6 *last __rte_unused;
-	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+				return -1;
+			}
+			local_cfg |= (DPAA2_FS_TABLE_RECONFIGURE |
+				DPAA2_FS_TABLE_IPADDR_EXTRACT);
+		}
 
-	group = attr->group;
+		if (spec_ipv4)
+			key = &spec_ipv4->hdr.src_addr;
+		else
+			key = &spec_ipv6->hdr.src_addr[0];
+		if (mask_ipv4) {
+			mask = &mask_ipv4->hdr.src_addr;
+			size = NH_FLD_IPV4_ADDR_SIZE;
+			prot = NET_PROT_IPV4;
+		} else {
+			mask = &mask_ipv6->hdr.src_addr[0];
+			size = NH_FLD_IPV6_ADDR_SIZE;
+			prot = NET_PROT_IPV6;
+		}
 
-	/* DPAA2 platform has a limitation that extract parameter can not be */
-	/* more	than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
-	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				prot, NH_FLD_IP_SRC,
+				key,	mask, size);
+		if (ret) {
+			DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
+			return -1;
+		}
 
-	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				prot, NH_FLD_IP_SRC,
+				key,	mask, size);
+		if (ret) {
+			DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
+			return -1;
+		}
 
-	for (j = 0; j < priv->pattern[8].item_count; j++) {
-		if (priv->pattern[8].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+		flow->ipaddr_rule.qos_ipsrc_offset =
+			dpaa2_flow_extract_key_offset(
+				&priv->extract.qos_key_extract,
+				prot, NH_FLD_IP_SRC);
+		flow->ipaddr_rule.fs_ipsrc_offset =
+			dpaa2_flow_extract_key_offset(
+				&priv->extract.tc_key_extract[group],
+				prot, NH_FLD_IP_SRC);
+	}
+
+	if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
+		(mask_ipv6 &&
+			memcmp((const char *)mask_ipv6->hdr.dst_addr,
+				zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_IP, NH_FLD_IP_DST);
+		if (index < 0) {
+			if (mask_ipv4)
+				size = NH_FLD_IPV4_ADDR_SIZE;
+			else
+				size = NH_FLD_IPV6_ADDR_SIZE;
+			ret = dpaa2_flow_extract_add(
+						&priv->extract.qos_key_extract,
+						NET_PROT_IP,
+						NH_FLD_IP_DST,
+						size);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
+
+				return -1;
+			}
+			local_cfg |= (DPAA2_QOS_TABLE_RECONFIGURE |
+				DPAA2_QOS_TABLE_IPADDR_EXTRACT);
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[8].pattern_type[j] = pattern->type;
-		priv->pattern[8].item_count++;
-		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
-	}
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_IP, NH_FLD_IP_DST);
+		if (index < 0) {
+			if (mask_ipv4)
+				size = NH_FLD_IPV4_ADDR_SIZE;
+			else
+				size = NH_FLD_IPV6_ADDR_SIZE;
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_IP,
+					NH_FLD_IP_DST,
+					size);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
 
-	entry_found = 0;
-	for (j = 0; j < priv->pattern[group].item_count; j++) {
-		if (priv->pattern[group].pattern_type[j] != pattern->type) {
-			continue;
+				return -1;
+			}
+			local_cfg |= (DPAA2_FS_TABLE_RECONFIGURE |
+				DPAA2_FS_TABLE_IPADDR_EXTRACT);
+		}
+
+		if (spec_ipv4)
+			key = &spec_ipv4->hdr.dst_addr;
+		else
+			key = spec_ipv6->hdr.dst_addr;
+		if (mask_ipv4) {
+			mask = &mask_ipv4->hdr.dst_addr;
+			size = NH_FLD_IPV4_ADDR_SIZE;
+			prot = NET_PROT_IPV4;
 		} else {
-			entry_found = 1;
-			break;
+			mask = &mask_ipv6->hdr.dst_addr[0];
+			size = NH_FLD_IPV6_ADDR_SIZE;
+			prot = NET_PROT_IPV6;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[group].pattern_type[j] = pattern->type;
-		priv->pattern[group].item_count++;
-		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
-	}
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				prot, NH_FLD_IP_DST,
+				key,	mask, size);
+		if (ret) {
+			DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
+			return -1;
+		}
 
-	/* Get traffic class index and flow id to be configured */
-	flow->tc_id = group;
-	flow->index = attr->priority;
-
-	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
-		index = priv->extract.qos_key_cfg.num_extracts;
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
-		index++;
-
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
-		index++;
-
-		priv->extract.qos_key_cfg.num_extracts = index;
-	}
-
-	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
-		index = priv->extract.fs_key_cfg[group].num_extracts;
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
-		index++;
-
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
-		index++;
-
-		priv->extract.fs_key_cfg[group].num_extracts = index;
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				prot, NH_FLD_IP_DST,
+				key,	mask, size);
+		if (ret) {
+			DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
+			return -1;
+		}
+		flow->ipaddr_rule.qos_ipdst_offset =
+			dpaa2_flow_extract_key_offset(
+				&priv->extract.qos_key_extract,
+				prot, NH_FLD_IP_DST);
+		flow->ipaddr_rule.fs_ipdst_offset =
+			dpaa2_flow_extract_key_offset(
+				&priv->extract.tc_key_extract[group],
+				prot, NH_FLD_IP_DST);
+	}
+
+	if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
+		(mask_ipv6 && mask_ipv6->hdr.proto)) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_IP, NH_FLD_IP_PROTO);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+				&priv->extract.qos_key_extract,
+				NET_PROT_IP,
+				NH_FLD_IP_PROTO,
+				NH_FLD_IP_PROTO_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+		}
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_IP, NH_FLD_IP_PROTO);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_IP,
+					NH_FLD_IP_PROTO,
+					NH_FLD_IP_PROTO_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+		}
+
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move ipaddr after NH_FLD_IP_PROTO rule set failed");
+			return -1;
+		}
+
+		if (spec_ipv4)
+			key = &spec_ipv4->hdr.next_proto_id;
+		else
+			key = &spec_ipv6->hdr.proto;
+		if (mask_ipv4)
+			mask = &mask_ipv4->hdr.next_proto_id;
+		else
+			mask = &mask_ipv6->hdr.proto;
+
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_IP,
+				NH_FLD_IP_PROTO,
+				key,	mask, NH_FLD_IP_PROTO_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
+			return -1;
+		}
+
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				NET_PROT_IP,
+				NH_FLD_IP_PROTO,
+				key,	mask, NH_FLD_IP_PROTO_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
+			return -1;
+		}
 	}
 
-	/* Parse pattern list to get the matching parameters */
-	spec	= (const struct rte_flow_item_ipv6 *)pattern->spec;
-	last	= (const struct rte_flow_item_ipv6 *)pattern->last;
-	mask	= (const struct rte_flow_item_ipv6 *)
-			(pattern->mask ? pattern->mask : default_mask);
+	(*device_configured) |= local_cfg;
 
-	key_iova = flow->rule.key_iova + flow->key_size;
-	memcpy((void *)key_iova, (const void *)(spec->hdr.src_addr),
-						sizeof(spec->hdr.src_addr));
-	key_iova += sizeof(spec->hdr.src_addr);
-	memcpy((void *)key_iova, (const void *)(spec->hdr.dst_addr),
-						sizeof(spec->hdr.dst_addr));
-
-	mask_iova = flow->rule.mask_iova + flow->key_size;
-	memcpy((void *)mask_iova, (const void *)(mask->hdr.src_addr),
-						sizeof(mask->hdr.src_addr));
-	mask_iova += sizeof(mask->hdr.src_addr);
-	memcpy((void *)mask_iova, (const void *)(mask->hdr.dst_addr),
-						sizeof(mask->hdr.dst_addr));
-
-	flow->key_size += sizeof(spec->hdr.src_addr) +
-					sizeof(mask->hdr.dst_addr);
-	return device_configured;
+	return 0;
 }
 
 static int
@@ -613,12 +1341,11 @@ dpaa2_configure_flow_icmp(struct rte_flow *flow,
 			  const struct rte_flow_attr *attr,
 			  const struct rte_flow_item *pattern,
 			  const struct rte_flow_action actions[] __rte_unused,
-			  struct rte_flow_error *error __rte_unused)
+			  struct rte_flow_error *error __rte_unused,
+			  int *device_configured)
 {
-	int index, j = 0;
-	size_t key_iova;
-	size_t mask_iova;
-	int device_configured = 0, entry_found = 0;
+	int index, ret;
+	int local_cfg = 0;
 	uint32_t group;
 	const struct rte_flow_item_icmp *spec, *mask;
 
@@ -627,116 +1354,220 @@ dpaa2_configure_flow_icmp(struct rte_flow *flow,
 
 	group = attr->group;
 
-	/* DPAA2 platform has a limitation that extract parameter can not be */
-	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
-	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+	/* Parse pattern list to get the matching parameters */
+	spec    = (const struct rte_flow_item_icmp *)pattern->spec;
+	last    = (const struct rte_flow_item_icmp *)pattern->last;
+	mask    = (const struct rte_flow_item_icmp *)
+		(pattern->mask ? pattern->mask : default_mask);
 
-	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+	/* Get traffic class index and flow id to be configured */
+	flow->tc_id = group;
+	flow->tc_index = attr->priority;
+
+	if (!spec) {
+		/* Don't care any field of ICMP header,
+		 * only care ICMP protocol.
+		 * Example: flow create 0 ingress pattern icmp /
+		 */
+		/* Next proto of Generical IP is actually used
+		 * for ICMP identification.
+		 */
+		struct proto_discrimination proto;
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_IP, NH_FLD_IP_PROTO);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+					&priv->extract.qos_key_extract,
+					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+			if (ret) {
+				DPAA2_PMD_ERR(
+					"QoS Extract IP protocol to discriminate ICMP failed.");
 
-	for (j = 0; j < priv->pattern[8].item_count; j++) {
-		if (priv->pattern[8].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[8].pattern_type[j] = pattern->type;
-		priv->pattern[8].item_count++;
-		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
-	}
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_IP, NH_FLD_IP_PROTO);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+					&priv->extract.tc_key_extract[group],
+					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+			if (ret) {
+				DPAA2_PMD_ERR(
+					"FS Extract IP protocol to discriminate ICMP failed.");
 
-	entry_found = 0;
-	for (j = 0; j < priv->pattern[group].item_count; j++) {
-		if (priv->pattern[group].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[group].pattern_type[j] = pattern->type;
-		priv->pattern[group].item_count++;
-		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move IP addr before ICMP discrimination set failed");
+			return -1;
+		}
+
+		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
+		proto.ip_proto = IPPROTO_ICMP;
+		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
+							proto, group);
+		if (ret) {
+			DPAA2_PMD_ERR("ICMP discrimination rule set failed");
+			return -1;
+		}
+
+		(*device_configured) |= local_cfg;
+
+		return 0;
 	}
 
-	/* Get traffic class index and flow id to be configured */
-	flow->tc_id = group;
-	flow->index = attr->priority;
-
-	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
-		index = priv->extract.qos_key_cfg.num_extracts;
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
-		index++;
-
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
-		index++;
-
-		priv->extract.qos_key_cfg.num_extracts = index;
-	}
-
-	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
-		index = priv->extract.fs_key_cfg[group].num_extracts;
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
-		index++;
-
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
-		index++;
-
-		priv->extract.fs_key_cfg[group].num_extracts = index;
+	if (mask->hdr.icmp_type) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.qos_key_extract,
+					NET_PROT_ICMP,
+					NH_FLD_ICMP_TYPE,
+					NH_FLD_ICMP_TYPE_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+		}
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_ICMP,
+					NH_FLD_ICMP_TYPE,
+					NH_FLD_ICMP_TYPE_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+		}
+
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move ipaddr before ICMP TYPE set failed");
+			return -1;
+		}
+
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_ICMP,
+				NH_FLD_ICMP_TYPE,
+				&spec->hdr.icmp_type,
+				&mask->hdr.icmp_type,
+				NH_FLD_ICMP_TYPE_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
+			return -1;
+		}
+
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				NET_PROT_ICMP,
+				NH_FLD_ICMP_TYPE,
+				&spec->hdr.icmp_type,
+				&mask->hdr.icmp_type,
+				NH_FLD_ICMP_TYPE_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
+			return -1;
+		}
 	}
 
-	/* Parse pattern list to get the matching parameters */
-	spec	= (const struct rte_flow_item_icmp *)pattern->spec;
-	last	= (const struct rte_flow_item_icmp *)pattern->last;
-	mask	= (const struct rte_flow_item_icmp *)
-			(pattern->mask ? pattern->mask : default_mask);
+	if (mask->hdr.icmp_code) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.qos_key_extract,
+					NET_PROT_ICMP,
+					NH_FLD_ICMP_CODE,
+					NH_FLD_ICMP_CODE_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+		}
 
-	key_iova = flow->rule.key_iova + flow->key_size;
-	memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_type,
-							sizeof(uint8_t));
-	key_iova += sizeof(uint8_t);
-	memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_code,
-							sizeof(uint8_t));
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_ICMP, NH_FLD_ICMP_CODE);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_ICMP,
+					NH_FLD_ICMP_CODE,
+					NH_FLD_ICMP_CODE_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
 
-	mask_iova = flow->rule.mask_iova + flow->key_size;
-	memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_type,
-							sizeof(uint8_t));
-	key_iova += sizeof(uint8_t);
-	memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_code,
-							sizeof(uint8_t));
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+		}
 
-	flow->key_size += 2 * sizeof(uint8_t);
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move ipaddr after ICMP CODE set failed");
+			return -1;
+		}
 
-	return device_configured;
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_ICMP,
+				NH_FLD_ICMP_CODE,
+				&spec->hdr.icmp_code,
+				&mask->hdr.icmp_code,
+				NH_FLD_ICMP_CODE_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
+			return -1;
+		}
+
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				NET_PROT_ICMP,
+				NH_FLD_ICMP_CODE,
+				&spec->hdr.icmp_code,
+				&mask->hdr.icmp_code,
+				NH_FLD_ICMP_CODE_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
+			return -1;
+		}
+	}
+
+	(*device_configured) |= local_cfg;
+
+	return 0;
 }
 
 static int
@@ -745,12 +1576,11 @@ dpaa2_configure_flow_udp(struct rte_flow *flow,
 			  const struct rte_flow_attr *attr,
 			  const struct rte_flow_item *pattern,
 			  const struct rte_flow_action actions[] __rte_unused,
-			  struct rte_flow_error *error __rte_unused)
+			  struct rte_flow_error *error __rte_unused,
+			  int *device_configured)
 {
-	int index, j = 0;
-	size_t key_iova;
-	size_t mask_iova;
-	int device_configured = 0, entry_found = 0;
+	int index, ret;
+	int local_cfg = 0;
 	uint32_t group;
 	const struct rte_flow_item_udp *spec, *mask;
 
@@ -759,115 +1589,217 @@ dpaa2_configure_flow_udp(struct rte_flow *flow,
 
 	group = attr->group;
 
-	/* DPAA2 platform has a limitation that extract parameter can not be */
-	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
-	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+	/* Parse pattern list to get the matching parameters */
+	spec    = (const struct rte_flow_item_udp *)pattern->spec;
+	last    = (const struct rte_flow_item_udp *)pattern->last;
+	mask    = (const struct rte_flow_item_udp *)
+		(pattern->mask ? pattern->mask : default_mask);
 
-	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+	/* Get traffic class index and flow id to be configured */
+	flow->tc_id = group;
+	flow->tc_index = attr->priority;
+
+	if (!spec || !mc_l4_port_identification) {
+		struct proto_discrimination proto;
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_IP, NH_FLD_IP_PROTO);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+					&priv->extract.qos_key_extract,
+					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+			if (ret) {
+				DPAA2_PMD_ERR(
+					"QoS Extract IP protocol to discriminate UDP failed.");
 
-	for (j = 0; j < priv->pattern[8].item_count; j++) {
-		if (priv->pattern[8].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			 entry_found = 1;
-			break;
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[8].pattern_type[j] = pattern->type;
-		priv->pattern[8].item_count++;
-		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
-	}
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_IP, NH_FLD_IP_PROTO);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+				&priv->extract.tc_key_extract[group],
+				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+			if (ret) {
+				DPAA2_PMD_ERR(
+					"FS Extract IP protocol to discriminate UDP failed.");
 
-	entry_found = 0;
-	for (j = 0; j < priv->pattern[group].item_count; j++) {
-		if (priv->pattern[group].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[group].pattern_type[j] = pattern->type;
-		priv->pattern[group].item_count++;
-		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move IP addr before UDP discrimination set failed");
+			return -1;
+		}
+
+		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
+		proto.ip_proto = IPPROTO_UDP;
+		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
+							proto, group);
+		if (ret) {
+			DPAA2_PMD_ERR("UDP discrimination rule set failed");
+			return -1;
+		}
+
+		(*device_configured) |= local_cfg;
+
+		if (!spec)
+			return 0;
 	}
 
-	/* Get traffic class index and flow id to be configured */
-	flow->tc_id = group;
-	flow->index = attr->priority;
+	if (mask->hdr.src_port) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.qos_key_extract,
+				NET_PROT_UDP,
+				NH_FLD_UDP_PORT_SRC,
+				NH_FLD_UDP_PORT_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
 
-	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
-		index = priv->extract.qos_key_cfg.num_extracts;
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
-		index++;
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+		}
 
-		priv->extract.qos_key_cfg.extracts[index].type = DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
-		index++;
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_UDP,
+					NH_FLD_UDP_PORT_SRC,
+					NH_FLD_UDP_PORT_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
 
-		priv->extract.qos_key_cfg.num_extracts = index;
-	}
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+		}
 
-	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
-		index = priv->extract.fs_key_cfg[group].num_extracts;
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
-		index++;
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move ipaddr before UDP_PORT_SRC set failed");
+			return -1;
+		}
 
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
-		index++;
+		ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_UDP,
+				NH_FLD_UDP_PORT_SRC,
+				&spec->hdr.src_port,
+				&mask->hdr.src_port,
+				NH_FLD_UDP_PORT_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"QoS NH_FLD_UDP_PORT_SRC rule data set failed");
+			return -1;
+		}
 
-		priv->extract.fs_key_cfg[group].num_extracts = index;
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				NET_PROT_UDP,
+				NH_FLD_UDP_PORT_SRC,
+				&spec->hdr.src_port,
+				&mask->hdr.src_port,
+				NH_FLD_UDP_PORT_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"FS NH_FLD_UDP_PORT_SRC rule data set failed");
+			return -1;
+		}
 	}
 
-	/* Parse pattern list to get the matching parameters */
-	spec	= (const struct rte_flow_item_udp *)pattern->spec;
-	last	= (const struct rte_flow_item_udp *)pattern->last;
-	mask	= (const struct rte_flow_item_udp *)
-			(pattern->mask ? pattern->mask : default_mask);
+	if (mask->hdr.dst_port) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.qos_key_extract,
+					NET_PROT_UDP,
+					NH_FLD_UDP_PORT_DST,
+					NH_FLD_UDP_PORT_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
 
-	key_iova = flow->rule.key_iova + flow->key_size;
-	memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
-							sizeof(uint16_t));
-	key_iova +=  sizeof(uint16_t);
-	memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
-							sizeof(uint16_t));
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+		}
 
-	mask_iova = flow->rule.mask_iova + flow->key_size;
-	memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
-							sizeof(uint16_t));
-	mask_iova +=  sizeof(uint16_t);
-	memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
-							sizeof(uint16_t));
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_UDP,
+					NH_FLD_UDP_PORT_DST,
+					NH_FLD_UDP_PORT_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+		}
+
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move ipaddr before UDP_PORT_DST set failed");
+			return -1;
+		}
+
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_UDP,
+				NH_FLD_UDP_PORT_DST,
+				&spec->hdr.dst_port,
+				&mask->hdr.dst_port,
+				NH_FLD_UDP_PORT_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"QoS NH_FLD_UDP_PORT_DST rule data set failed");
+			return -1;
+		}
+
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				NET_PROT_UDP,
+				NH_FLD_UDP_PORT_DST,
+				&spec->hdr.dst_port,
+				&mask->hdr.dst_port,
+				NH_FLD_UDP_PORT_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"FS NH_FLD_UDP_PORT_DST rule data set failed");
+			return -1;
+		}
+	}
 
-	flow->key_size += (2 * sizeof(uint16_t));
+	(*device_configured) |= local_cfg;
 
-	return device_configured;
+	return 0;
 }
 
 static int
@@ -876,130 +1808,231 @@ dpaa2_configure_flow_tcp(struct rte_flow *flow,
 			 const struct rte_flow_attr *attr,
 			 const struct rte_flow_item *pattern,
 			 const struct rte_flow_action actions[] __rte_unused,
-			 struct rte_flow_error *error __rte_unused)
+			 struct rte_flow_error *error __rte_unused,
+			 int *device_configured)
 {
-	int index, j = 0;
-	size_t key_iova;
-	size_t mask_iova;
-	int device_configured = 0, entry_found = 0;
+	int index, ret;
+	int local_cfg = 0;
 	uint32_t group;
 	const struct rte_flow_item_tcp *spec, *mask;
 
-	const struct rte_flow_item_tcp *last __rte_unused;
-	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+	const struct rte_flow_item_tcp *last __rte_unused;
+	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+	group = attr->group;
+
+	/* Parse pattern list to get the matching parameters */
+	spec    = (const struct rte_flow_item_tcp *)pattern->spec;
+	last    = (const struct rte_flow_item_tcp *)pattern->last;
+	mask    = (const struct rte_flow_item_tcp *)
+		(pattern->mask ? pattern->mask : default_mask);
+
+	/* Get traffic class index and flow id to be configured */
+	flow->tc_id = group;
+	flow->tc_index = attr->priority;
+
+	if (!spec || !mc_l4_port_identification) {
+		struct proto_discrimination proto;
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_IP, NH_FLD_IP_PROTO);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+					&priv->extract.qos_key_extract,
+					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+			if (ret) {
+				DPAA2_PMD_ERR(
+					"QoS Extract IP protocol to discriminate TCP failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+		}
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_IP, NH_FLD_IP_PROTO);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+				&priv->extract.tc_key_extract[group],
+				DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+			if (ret) {
+				DPAA2_PMD_ERR(
+					"FS Extract IP protocol to discriminate TCP failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+		}
+
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move IP addr before TCP discrimination set failed");
+			return -1;
+		}
+
+		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
+		proto.ip_proto = IPPROTO_TCP;
+		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
+							proto, group);
+		if (ret) {
+			DPAA2_PMD_ERR("TCP discrimination rule set failed");
+			return -1;
+		}
 
-	group = attr->group;
+		(*device_configured) |= local_cfg;
 
-	/* DPAA2 platform has a limitation that extract parameter can not be */
-	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
-	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
+		if (!spec)
+			return 0;
 	}
 
-	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+	if (mask->hdr.src_port) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.qos_key_extract,
+					NET_PROT_TCP,
+					NH_FLD_TCP_PORT_SRC,
+					NH_FLD_TCP_PORT_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
 
-	for (j = 0; j < priv->pattern[8].item_count; j++) {
-		if (priv->pattern[8].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[8].pattern_type[j] = pattern->type;
-		priv->pattern[8].item_count++;
-		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
-	}
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_TCP,
+					NH_FLD_TCP_PORT_SRC,
+					NH_FLD_TCP_PORT_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
 
-	entry_found = 0;
-	for (j = 0; j < priv->pattern[group].item_count; j++) {
-		if (priv->pattern[group].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[group].pattern_type[j] = pattern->type;
-		priv->pattern[group].item_count++;
-		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
-	}
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move ipaddr before TCP_PORT_SRC set failed");
+			return -1;
+		}
 
-	/* Get traffic class index and flow id to be configured */
-	flow->tc_id = group;
-	flow->index = attr->priority;
-
-	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
-		index = priv->extract.qos_key_cfg.num_extracts;
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
-		index++;
-
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
-		index++;
-
-		priv->extract.qos_key_cfg.num_extracts = index;
-	}
-
-	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
-		index = priv->extract.fs_key_cfg[group].num_extracts;
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
-		index++;
-
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
-		index++;
-
-		priv->extract.fs_key_cfg[group].num_extracts = index;
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_TCP,
+				NH_FLD_TCP_PORT_SRC,
+				&spec->hdr.src_port,
+				&mask->hdr.src_port,
+				NH_FLD_TCP_PORT_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"QoS NH_FLD_TCP_PORT_SRC rule data set failed");
+			return -1;
+		}
+
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				NET_PROT_TCP,
+				NH_FLD_TCP_PORT_SRC,
+				&spec->hdr.src_port,
+				&mask->hdr.src_port,
+				NH_FLD_TCP_PORT_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"FS NH_FLD_TCP_PORT_SRC rule data set failed");
+			return -1;
+		}
 	}
 
-	/* Parse pattern list to get the matching parameters */
-	spec	= (const struct rte_flow_item_tcp *)pattern->spec;
-	last	= (const struct rte_flow_item_tcp *)pattern->last;
-	mask	= (const struct rte_flow_item_tcp *)
-			(pattern->mask ? pattern->mask : default_mask);
+	if (mask->hdr.dst_port) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.qos_key_extract,
+					NET_PROT_TCP,
+					NH_FLD_TCP_PORT_DST,
+					NH_FLD_TCP_PORT_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+		}
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_TCP,
+					NH_FLD_TCP_PORT_DST,
+					NH_FLD_TCP_PORT_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+		}
 
-	key_iova = flow->rule.key_iova + flow->key_size;
-	memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
-							sizeof(uint16_t));
-	key_iova += sizeof(uint16_t);
-	memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
-							sizeof(uint16_t));
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move ipaddr before TCP_PORT_DST set failed");
+			return -1;
+		}
+
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_TCP,
+				NH_FLD_TCP_PORT_DST,
+				&spec->hdr.dst_port,
+				&mask->hdr.dst_port,
+				NH_FLD_TCP_PORT_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"QoS NH_FLD_TCP_PORT_DST rule data set failed");
+			return -1;
+		}
 
-	mask_iova = flow->rule.mask_iova + flow->key_size;
-	memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
-							sizeof(uint16_t));
-	mask_iova += sizeof(uint16_t);
-	memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
-							sizeof(uint16_t));
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				NET_PROT_TCP,
+				NH_FLD_TCP_PORT_DST,
+				&spec->hdr.dst_port,
+				&mask->hdr.dst_port,
+				NH_FLD_TCP_PORT_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"FS NH_FLD_TCP_PORT_DST rule data set failed");
+			return -1;
+		}
+	}
 
-	flow->key_size += 2 * sizeof(uint16_t);
+	(*device_configured) |= local_cfg;
 
-	return device_configured;
+	return 0;
 }
 
 static int
@@ -1008,12 +2041,11 @@ dpaa2_configure_flow_sctp(struct rte_flow *flow,
 			  const struct rte_flow_attr *attr,
 			  const struct rte_flow_item *pattern,
 			  const struct rte_flow_action actions[] __rte_unused,
-			  struct rte_flow_error *error __rte_unused)
+			  struct rte_flow_error *error __rte_unused,
+			  int *device_configured)
 {
-	int index, j = 0;
-	size_t key_iova;
-	size_t mask_iova;
-	int device_configured = 0, entry_found = 0;
+	int index, ret;
+	int local_cfg = 0;
 	uint32_t group;
 	const struct rte_flow_item_sctp *spec, *mask;
 
@@ -1022,116 +2054,218 @@ dpaa2_configure_flow_sctp(struct rte_flow *flow,
 
 	group = attr->group;
 
-	/* DPAA2 platform has a limitation that extract parameter can not be */
-	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
-	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+	/* Parse pattern list to get the matching parameters */
+	spec    = (const struct rte_flow_item_sctp *)pattern->spec;
+	last    = (const struct rte_flow_item_sctp *)pattern->last;
+	mask    = (const struct rte_flow_item_sctp *)
+			(pattern->mask ? pattern->mask : default_mask);
 
-	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+	/* Get traffic class index and flow id to be configured */
+	flow->tc_id = group;
+	flow->tc_index = attr->priority;
+
+	if (!spec || !mc_l4_port_identification) {
+		struct proto_discrimination proto;
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_IP, NH_FLD_IP_PROTO);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+					&priv->extract.qos_key_extract,
+					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+			if (ret) {
+				DPAA2_PMD_ERR(
+					"QoS Extract IP protocol to discriminate SCTP failed.");
 
-	for (j = 0; j < priv->pattern[8].item_count; j++) {
-		if (priv->pattern[8].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[8].pattern_type[j] = pattern->type;
-		priv->pattern[8].item_count++;
-		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
-	}
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_IP, NH_FLD_IP_PROTO);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+					&priv->extract.tc_key_extract[group],
+					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+			if (ret) {
+				DPAA2_PMD_ERR(
+					"FS Extract IP protocol to discriminate SCTP failed.");
 
-	entry_found = 0;
-	for (j = 0; j < priv->pattern[group].item_count; j++) {
-		if (priv->pattern[group].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[group].pattern_type[j] = pattern->type;
-		priv->pattern[group].item_count++;
-		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move ipaddr before SCTP discrimination set failed");
+			return -1;
+		}
+
+		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
+		proto.ip_proto = IPPROTO_SCTP;
+		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
+							proto, group);
+		if (ret) {
+			DPAA2_PMD_ERR("SCTP discrimination rule set failed");
+			return -1;
+		}
+
+		(*device_configured) |= local_cfg;
+
+		if (!spec)
+			return 0;
 	}
 
-	/* Get traffic class index and flow id to be configured */
-	flow->tc_id = group;
-	flow->index = attr->priority;
-
-	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
-		index = priv->extract.qos_key_cfg.num_extracts;
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
-		index++;
-
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
-		index++;
-
-		priv->extract.qos_key_cfg.num_extracts = index;
-	}
-
-	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
-		index = priv->extract.fs_key_cfg[group].num_extracts;
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
-		index++;
-
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
-		index++;
-
-		priv->extract.fs_key_cfg[group].num_extracts = index;
+	if (mask->hdr.src_port) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.qos_key_extract,
+					NET_PROT_SCTP,
+					NH_FLD_SCTP_PORT_SRC,
+					NH_FLD_SCTP_PORT_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+		}
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_SCTP,
+					NH_FLD_SCTP_PORT_SRC,
+					NH_FLD_SCTP_PORT_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+		}
+
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move ipaddr before SCTP_PORT_SRC set failed");
+			return -1;
+		}
+
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_SCTP,
+				NH_FLD_SCTP_PORT_SRC,
+				&spec->hdr.src_port,
+				&mask->hdr.src_port,
+				NH_FLD_SCTP_PORT_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
+			return -1;
+		}
+
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				NET_PROT_SCTP,
+				NH_FLD_SCTP_PORT_SRC,
+				&spec->hdr.src_port,
+				&mask->hdr.src_port,
+				NH_FLD_SCTP_PORT_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"FS NH_FLD_SCTP_PORT_SRC rule data set failed");
+			return -1;
+		}
 	}
 
-	/* Parse pattern list to get the matching parameters */
-	spec	= (const struct rte_flow_item_sctp *)pattern->spec;
-	last	= (const struct rte_flow_item_sctp *)pattern->last;
-	mask	= (const struct rte_flow_item_sctp *)
-			(pattern->mask ? pattern->mask : default_mask);
+	if (mask->hdr.dst_port) {
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.qos_key_extract,
+					NET_PROT_SCTP,
+					NH_FLD_SCTP_PORT_DST,
+					NH_FLD_SCTP_PORT_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+		}
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
+		if (index < 0) {
+			ret = dpaa2_flow_extract_add(
+					&priv->extract.tc_key_extract[group],
+					NET_PROT_SCTP,
+					NH_FLD_SCTP_PORT_DST,
+					NH_FLD_SCTP_PORT_SIZE);
+			if (ret) {
+				DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+		}
+
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move ipaddr before SCTP_PORT_DST set failed");
+			return -1;
+		}
 
-	key_iova = flow->rule.key_iova + flow->key_size;
-	memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
-							sizeof(uint16_t));
-	key_iova += sizeof(uint16_t);
-	memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
-							sizeof(uint16_t));
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_SCTP,
+				NH_FLD_SCTP_PORT_DST,
+				&spec->hdr.dst_port,
+				&mask->hdr.dst_port,
+				NH_FLD_SCTP_PORT_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"QoS NH_FLD_SCTP_PORT_DST rule data set failed");
+			return -1;
+		}
 
-	mask_iova = flow->rule.mask_iova + flow->key_size;
-	memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
-							sizeof(uint16_t));
-	mask_iova += sizeof(uint16_t);
-	memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
-							sizeof(uint16_t));
+		ret = dpaa2_flow_rule_data_set(
+				&priv->extract.tc_key_extract[group],
+				&flow->fs_rule,
+				NET_PROT_SCTP,
+				NH_FLD_SCTP_PORT_DST,
+				&spec->hdr.dst_port,
+				&mask->hdr.dst_port,
+				NH_FLD_SCTP_PORT_SIZE);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"FS NH_FLD_SCTP_PORT_DST rule data set failed");
+			return -1;
+		}
+	}
 
-	flow->key_size += 2 * sizeof(uint16_t);
+	(*device_configured) |= local_cfg;
 
-	return device_configured;
+	return 0;
 }
 
 static int
@@ -1140,12 +2274,11 @@ dpaa2_configure_flow_gre(struct rte_flow *flow,
 			 const struct rte_flow_attr *attr,
 			 const struct rte_flow_item *pattern,
 			 const struct rte_flow_action actions[] __rte_unused,
-			 struct rte_flow_error *error __rte_unused)
+			 struct rte_flow_error *error __rte_unused,
+			 int *device_configured)
 {
-	int index, j = 0;
-	size_t key_iova;
-	size_t mask_iova;
-	int device_configured = 0, entry_found = 0;
+	int index, ret;
+	int local_cfg = 0;
 	uint32_t group;
 	const struct rte_flow_item_gre *spec, *mask;
 
@@ -1154,96 +2287,413 @@ dpaa2_configure_flow_gre(struct rte_flow *flow,
 
 	group = attr->group;
 
-	/* DPAA2 platform has a limitation that extract parameter can not be */
-	/* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
-	if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
-	}
+	/* Parse pattern list to get the matching parameters */
+	spec    = (const struct rte_flow_item_gre *)pattern->spec;
+	last    = (const struct rte_flow_item_gre *)pattern->last;
+	mask    = (const struct rte_flow_item_gre *)
+		(pattern->mask ? pattern->mask : default_mask);
+
+	/* Get traffic class index and flow id to be configured */
+	flow->tc_id = group;
+	flow->tc_index = attr->priority;
+
+	if (!spec) {
+		struct proto_discrimination proto;
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.qos_key_extract.dpkg,
+				NET_PROT_IP, NH_FLD_IP_PROTO);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+					&priv->extract.qos_key_extract,
+					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+			if (ret) {
+				DPAA2_PMD_ERR(
+					"QoS Extract IP protocol to discriminate GRE failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+		}
+
+		index = dpaa2_flow_extract_search(
+				&priv->extract.tc_key_extract[group].dpkg,
+				NET_PROT_IP, NH_FLD_IP_PROTO);
+		if (index < 0) {
+			ret = dpaa2_flow_proto_discrimination_extract(
+					&priv->extract.tc_key_extract[group],
+					DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+			if (ret) {
+				DPAA2_PMD_ERR(
+					"FS Extract IP protocol to discriminate GRE failed.");
+
+				return -1;
+			}
+			local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+		}
 
-	if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
-		DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
-						DPKG_MAX_NUM_OF_EXTRACTS);
-		return -ENOTSUP;
+		ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+		if (ret) {
+			DPAA2_PMD_ERR(
+				"Move IP addr before GRE discrimination set failed");
+			return -1;
+		}
+
+		proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
+		proto.ip_proto = IPPROTO_GRE;
+		ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
+							proto, group);
+		if (ret) {
+			DPAA2_PMD_ERR("GRE discrimination rule set failed");
+			return -1;
+		}
+
+		(*device_configured) |= local_cfg;
+
+		return 0;
 	}
 
-	for (j = 0; j < priv->pattern[8].item_count; j++) {
-		if (priv->pattern[8].pattern_type[j] != pattern->type) {
-			continue;
-		} else {
-			entry_found = 1;
-			break;
+	if (!mask->protocol)
+		return 0;
+
+	index = dpaa2_flow_extract_search(
+			&priv->extract.qos_key_extract.dpkg,
+			NET_PROT_GRE, NH_FLD_GRE_TYPE);
+	if (index < 0) {
+		ret = dpaa2_flow_extract_add(
+				&priv->extract.qos_key_extract,
+				NET_PROT_GRE,
+				NH_FLD_GRE_TYPE,
+				sizeof(rte_be16_t));
+		if (ret) {
+			DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
+
+			return -1;
+		}
+		local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+	}
+
+	index = dpaa2_flow_extract_search(
+			&priv->extract.tc_key_extract[group].dpkg,
+			NET_PROT_GRE, NH_FLD_GRE_TYPE);
+	if (index < 0) {
+		ret = dpaa2_flow_extract_add(
+				&priv->extract.tc_key_extract[group],
+				NET_PROT_GRE,
+				NH_FLD_GRE_TYPE,
+				sizeof(rte_be16_t));
+		if (ret) {
+			DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
+
+			return -1;
 		}
+		local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
 	}
 
-	if (!entry_found) {
-		priv->pattern[8].pattern_type[j] = pattern->type;
-		priv->pattern[8].item_count++;
-		device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
+	ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+	if (ret) {
+		DPAA2_PMD_ERR(
+			"Move ipaddr before GRE_TYPE set failed");
+		return -1;
+	}
+
+	ret = dpaa2_flow_rule_data_set(
+				&priv->extract.qos_key_extract,
+				&flow->qos_rule,
+				NET_PROT_GRE,
+				NH_FLD_GRE_TYPE,
+				&spec->protocol,
+				&mask->protocol,
+				sizeof(rte_be16_t));
+	if (ret) {
+		DPAA2_PMD_ERR(
+			"QoS NH_FLD_GRE_TYPE rule data set failed");
+		return -1;
+	}
+
+	ret = dpaa2_flow_rule_data_set(
+			&priv->extract.tc_key_extract[group],
+			&flow->fs_rule,
+			NET_PROT_GRE,
+			NH_FLD_GRE_TYPE,
+			&spec->protocol,
+			&mask->protocol,
+			sizeof(rte_be16_t));
+	if (ret) {
+		DPAA2_PMD_ERR(
+			"FS NH_FLD_GRE_TYPE rule data set failed");
+		return -1;
 	}
 
-	entry_found = 0;
-	for (j = 0; j < priv->pattern[group].item_count; j++) {
-		if (priv->pattern[group].pattern_type[j] != pattern->type) {
+	(*device_configured) |= local_cfg;
+
+	return 0;
+}
+
+/* The existing QoS/FS entry with IP address(es)
+ * needs update after
+ * new extract(s) are inserted before IP
+ * address(es) extract(s).
+ */
+static int
+dpaa2_flow_entry_update(
+	struct dpaa2_dev_priv *priv, uint8_t tc_id)
+{
+	struct rte_flow *curr = LIST_FIRST(&priv->flows);
+	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+	int ret;
+	int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
+	int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
+	struct dpaa2_key_extract *qos_key_extract =
+		&priv->extract.qos_key_extract;
+	struct dpaa2_key_extract *tc_key_extract =
+		&priv->extract.tc_key_extract[tc_id];
+	char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
+	char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
+	char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
+	char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
+	int extend = -1, extend1, size;
+
+	while (curr) {
+		if (curr->ipaddr_rule.ipaddr_type ==
+			FLOW_NONE_IPADDR) {
+			curr = LIST_NEXT(curr, next);
 			continue;
+		}
+
+		if (curr->ipaddr_rule.ipaddr_type ==
+			FLOW_IPV4_ADDR) {
+			qos_ipsrc_offset =
+				qos_key_extract->key_info.ipv4_src_offset;
+			qos_ipdst_offset =
+				qos_key_extract->key_info.ipv4_dst_offset;
+			fs_ipsrc_offset =
+				tc_key_extract->key_info.ipv4_src_offset;
+			fs_ipdst_offset =
+				tc_key_extract->key_info.ipv4_dst_offset;
+			size = NH_FLD_IPV4_ADDR_SIZE;
 		} else {
-			entry_found = 1;
-			break;
+			qos_ipsrc_offset =
+				qos_key_extract->key_info.ipv6_src_offset;
+			qos_ipdst_offset =
+				qos_key_extract->key_info.ipv6_dst_offset;
+			fs_ipsrc_offset =
+				tc_key_extract->key_info.ipv6_src_offset;
+			fs_ipdst_offset =
+				tc_key_extract->key_info.ipv6_dst_offset;
+			size = NH_FLD_IPV6_ADDR_SIZE;
 		}
-	}
 
-	if (!entry_found) {
-		priv->pattern[group].pattern_type[j] = pattern->type;
-		priv->pattern[group].item_count++;
-		device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
-	}
+		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
+				priv->token, &curr->qos_rule);
+		if (ret) {
+			DPAA2_PMD_ERR("Qos entry remove failed.");
+			return -1;
+		}
 
-	/* Get traffic class index and flow id to be configured */
-	flow->tc_id = group;
-	flow->index = attr->priority;
+		extend = -1;
+
+		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
+			RTE_ASSERT(qos_ipsrc_offset >=
+				curr->ipaddr_rule.qos_ipsrc_offset);
+			extend1 = qos_ipsrc_offset -
+				curr->ipaddr_rule.qos_ipsrc_offset;
+			if (extend >= 0)
+				RTE_ASSERT(extend == extend1);
+			else
+				extend = extend1;
+
+			memcpy(ipsrc_key,
+				(char *)(size_t)curr->qos_rule.key_iova +
+				curr->ipaddr_rule.qos_ipsrc_offset,
+				size);
+			memset((char *)(size_t)curr->qos_rule.key_iova +
+				curr->ipaddr_rule.qos_ipsrc_offset,
+				0, size);
+
+			memcpy(ipsrc_mask,
+				(char *)(size_t)curr->qos_rule.mask_iova +
+				curr->ipaddr_rule.qos_ipsrc_offset,
+				size);
+			memset((char *)(size_t)curr->qos_rule.mask_iova +
+				curr->ipaddr_rule.qos_ipsrc_offset,
+				0, size);
+
+			curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
+		}
 
-	if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
-		index = priv->extract.qos_key_cfg.num_extracts;
-		priv->extract.qos_key_cfg.extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
-		priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
-		index++;
+		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
+			RTE_ASSERT(qos_ipdst_offset >=
+				curr->ipaddr_rule.qos_ipdst_offset);
+			extend1 = qos_ipdst_offset -
+				curr->ipaddr_rule.qos_ipdst_offset;
+			if (extend >= 0)
+				RTE_ASSERT(extend == extend1);
+			else
+				extend = extend1;
+
+			memcpy(ipdst_key,
+				(char *)(size_t)curr->qos_rule.key_iova +
+				curr->ipaddr_rule.qos_ipdst_offset,
+				size);
+			memset((char *)(size_t)curr->qos_rule.key_iova +
+				curr->ipaddr_rule.qos_ipdst_offset,
+				0, size);
+
+			memcpy(ipdst_mask,
+				(char *)(size_t)curr->qos_rule.mask_iova +
+				curr->ipaddr_rule.qos_ipdst_offset,
+				size);
+			memset((char *)(size_t)curr->qos_rule.mask_iova +
+				curr->ipaddr_rule.qos_ipdst_offset,
+				0, size);
+
+			curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
+		}
 
-		priv->extract.qos_key_cfg.num_extracts = index;
-	}
+		if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
+			memcpy((char *)(size_t)curr->qos_rule.key_iova +
+				curr->ipaddr_rule.qos_ipsrc_offset,
+				ipsrc_key,
+				size);
+			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
+				curr->ipaddr_rule.qos_ipsrc_offset,
+				ipsrc_mask,
+				size);
+		}
+		if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
+			memcpy((char *)(size_t)curr->qos_rule.key_iova +
+				curr->ipaddr_rule.qos_ipdst_offset,
+				ipdst_key,
+				size);
+			memcpy((char *)(size_t)curr->qos_rule.mask_iova +
+				curr->ipaddr_rule.qos_ipdst_offset,
+				ipdst_mask,
+				size);
+		}
 
-	if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
-		index = priv->extract.fs_key_cfg[group].num_extracts;
-		priv->extract.fs_key_cfg[group].extracts[index].type =
-							DPKG_EXTRACT_FROM_HDR;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
-		priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
-		index++;
+		if (extend >= 0)
+			curr->qos_rule.key_size += extend;
 
-		priv->extract.fs_key_cfg[group].num_extracts = index;
-	}
+		ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
+				priv->token, &curr->qos_rule,
+				curr->tc_id, curr->qos_index,
+				0, 0);
+		if (ret) {
+			DPAA2_PMD_ERR("Qos entry update failed.");
+			return -1;
+		}
 
-	/* Parse pattern list to get the matching parameters */
-	spec	= (const struct rte_flow_item_gre *)pattern->spec;
-	last	= (const struct rte_flow_item_gre *)pattern->last;
-	mask	= (const struct rte_flow_item_gre *)
-			(pattern->mask ? pattern->mask : default_mask);
+		if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
+			curr = LIST_NEXT(curr, next);
+			continue;
+		}
+
+		extend = -1;
+
+		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
+				priv->token, curr->tc_id, &curr->fs_rule);
+		if (ret) {
+			DPAA2_PMD_ERR("FS entry remove failed.");
+			return -1;
+		}
+
+		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
+			tc_id == curr->tc_id) {
+			RTE_ASSERT(fs_ipsrc_offset >=
+				curr->ipaddr_rule.fs_ipsrc_offset);
+			extend1 = fs_ipsrc_offset -
+				curr->ipaddr_rule.fs_ipsrc_offset;
+			if (extend >= 0)
+				RTE_ASSERT(extend == extend1);
+			else
+				extend = extend1;
+
+			memcpy(ipsrc_key,
+				(char *)(size_t)curr->fs_rule.key_iova +
+				curr->ipaddr_rule.fs_ipsrc_offset,
+				size);
+			memset((char *)(size_t)curr->fs_rule.key_iova +
+				curr->ipaddr_rule.fs_ipsrc_offset,
+				0, size);
+
+			memcpy(ipsrc_mask,
+				(char *)(size_t)curr->fs_rule.mask_iova +
+				curr->ipaddr_rule.fs_ipsrc_offset,
+				size);
+			memset((char *)(size_t)curr->fs_rule.mask_iova +
+				curr->ipaddr_rule.fs_ipsrc_offset,
+				0, size);
+
+			curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
+		}
+
+		if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
+			tc_id == curr->tc_id) {
+			RTE_ASSERT(fs_ipdst_offset >=
+				curr->ipaddr_rule.fs_ipdst_offset);
+			extend1 = fs_ipdst_offset -
+				curr->ipaddr_rule.fs_ipdst_offset;
+			if (extend >= 0)
+				RTE_ASSERT(extend == extend1);
+			else
+				extend = extend1;
+
+			memcpy(ipdst_key,
+				(char *)(size_t)curr->fs_rule.key_iova +
+				curr->ipaddr_rule.fs_ipdst_offset,
+				size);
+			memset((char *)(size_t)curr->fs_rule.key_iova +
+				curr->ipaddr_rule.fs_ipdst_offset,
+				0, size);
+
+			memcpy(ipdst_mask,
+				(char *)(size_t)curr->fs_rule.mask_iova +
+				curr->ipaddr_rule.fs_ipdst_offset,
+				size);
+			memset((char *)(size_t)curr->fs_rule.mask_iova +
+				curr->ipaddr_rule.fs_ipdst_offset,
+				0, size);
+
+			curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
+		}
+
+		if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
+			memcpy((char *)(size_t)curr->fs_rule.key_iova +
+				curr->ipaddr_rule.fs_ipsrc_offset,
+				ipsrc_key,
+				size);
+			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
+				curr->ipaddr_rule.fs_ipsrc_offset,
+				ipsrc_mask,
+				size);
+		}
+		if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
+			memcpy((char *)(size_t)curr->fs_rule.key_iova +
+				curr->ipaddr_rule.fs_ipdst_offset,
+				ipdst_key,
+				size);
+			memcpy((char *)(size_t)curr->fs_rule.mask_iova +
+				curr->ipaddr_rule.fs_ipdst_offset,
+				ipdst_mask,
+				size);
+		}
 
-	key_iova = flow->rule.key_iova + flow->key_size;
-	memcpy((void *)key_iova, (const void *)(&spec->protocol),
-							sizeof(rte_be16_t));
+		if (extend >= 0)
+			curr->fs_rule.key_size += extend;
 
-	mask_iova = flow->rule.mask_iova + flow->key_size;
-	memcpy((void *)mask_iova, (const void *)(&mask->protocol),
-							sizeof(rte_be16_t));
+		ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
+				priv->token, curr->tc_id, curr->fs_index,
+				&curr->fs_rule, &curr->action_cfg);
+		if (ret) {
+			DPAA2_PMD_ERR("FS entry update failed.");
+			return -1;
+		}
 
-	flow->key_size += sizeof(rte_be16_t);
+		curr = LIST_NEXT(curr, next);
+	}
 
-	return device_configured;
+	return 0;
 }
 
 static int
@@ -1262,7 +2712,6 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
 	struct dpni_attr nic_attr;
 	struct dpni_rx_tc_dist_cfg tc_cfg;
 	struct dpni_qos_tbl_cfg qos_cfg;
-	struct dpkg_profile_cfg key_cfg;
 	struct dpni_fs_action_cfg action;
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
 	struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
@@ -1273,75 +2722,77 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
 	while (!end_of_list) {
 		switch (pattern[i].type) {
 		case RTE_FLOW_ITEM_TYPE_ETH:
-			is_keycfg_configured = dpaa2_configure_flow_eth(flow,
-									dev,
-									attr,
-									&pattern[i],
-									actions,
-									error);
+			ret = dpaa2_configure_flow_eth(flow,
+					dev, attr, &pattern[i], actions, error,
+					&is_keycfg_configured);
+			if (ret) {
+				DPAA2_PMD_ERR("ETH flow configuration failed!");
+				return ret;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_VLAN:
-			is_keycfg_configured = dpaa2_configure_flow_vlan(flow,
-									dev,
-									attr,
-									&pattern[i],
-									actions,
-									error);
+			ret = dpaa2_configure_flow_vlan(flow,
+					dev, attr, &pattern[i], actions, error,
+					&is_keycfg_configured);
+			if (ret) {
+				DPAA2_PMD_ERR("vLan flow configuration failed!");
+				return ret;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
-			is_keycfg_configured = dpaa2_configure_flow_ipv4(flow,
-									dev,
-									attr,
-									&pattern[i],
-									actions,
-									error);
-			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
-			is_keycfg_configured = dpaa2_configure_flow_ipv6(flow,
-									dev,
-									attr,
-									&pattern[i],
-									actions,
-									error);
+			ret = dpaa2_configure_flow_generic_ip(flow,
+					dev, attr, &pattern[i], actions, error,
+					&is_keycfg_configured);
+			if (ret) {
+				DPAA2_PMD_ERR("IP flow configuration failed!");
+				return ret;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_ICMP:
-			is_keycfg_configured = dpaa2_configure_flow_icmp(flow,
-									dev,
-									attr,
-									&pattern[i],
-									actions,
-									error);
+			ret = dpaa2_configure_flow_icmp(flow,
+					dev, attr, &pattern[i], actions, error,
+					&is_keycfg_configured);
+			if (ret) {
+				DPAA2_PMD_ERR("ICMP flow configuration failed!");
+				return ret;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_UDP:
-			is_keycfg_configured = dpaa2_configure_flow_udp(flow,
-									dev,
-									attr,
-									&pattern[i],
-									actions,
-									error);
+			ret = dpaa2_configure_flow_udp(flow,
+					dev, attr, &pattern[i], actions, error,
+					&is_keycfg_configured);
+			if (ret) {
+				DPAA2_PMD_ERR("UDP flow configuration failed!");
+				return ret;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
-			is_keycfg_configured = dpaa2_configure_flow_tcp(flow,
-									dev,
-									attr,
-									&pattern[i],
-									actions,
-									error);
+			ret = dpaa2_configure_flow_tcp(flow,
+					dev, attr, &pattern[i], actions, error,
+					&is_keycfg_configured);
+			if (ret) {
+				DPAA2_PMD_ERR("TCP flow configuration failed!");
+				return ret;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_SCTP:
-			is_keycfg_configured = dpaa2_configure_flow_sctp(flow,
-									dev, attr,
-									&pattern[i],
-									actions,
-									error);
+			ret = dpaa2_configure_flow_sctp(flow,
+					dev, attr, &pattern[i], actions, error,
+					&is_keycfg_configured);
+			if (ret) {
+				DPAA2_PMD_ERR("SCTP flow configuration failed!");
+				return ret;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_GRE:
-			is_keycfg_configured = dpaa2_configure_flow_gre(flow,
-									dev,
-									attr,
-									&pattern[i],
-									actions,
-									error);
+			ret = dpaa2_configure_flow_gre(flow,
+					dev, attr, &pattern[i], actions, error,
+					&is_keycfg_configured);
+			if (ret) {
+				DPAA2_PMD_ERR("GRE flow configuration failed!");
+				return ret;
+			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_END:
 			end_of_list = 1;
@@ -1365,8 +2816,8 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
 			memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
 			action.flow_id = flow->flow_id;
 			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
-				if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
-							 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
+				if (dpkg_prepare_key_cfg(&priv->extract.qos_key_extract.dpkg,
+					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
 					DPAA2_PMD_ERR(
 					"Unable to prepare extract parameters");
 					return -1;
@@ -1377,7 +2828,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
 				qos_cfg.keep_entries = true;
 				qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
-							 priv->token, &qos_cfg);
+						priv->token, &qos_cfg);
 				if (ret < 0) {
 					DPAA2_PMD_ERR(
 					"Distribution cannot be configured.(%d)"
@@ -1386,8 +2837,10 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
 				}
 			}
 			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
-				if (dpkg_prepare_key_cfg(&priv->extract.fs_key_cfg[flow->tc_id],
-						(uint8_t *)(size_t)priv->extract.fs_extract_param[flow->tc_id]) < 0) {
+				if (dpkg_prepare_key_cfg(
+				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
+				(uint8_t *)(size_t)priv->extract
+				.tc_extract_param[flow->tc_id]) < 0) {
 					DPAA2_PMD_ERR(
 					"Unable to prepare extract parameters");
 					return -1;
@@ -1397,7 +2850,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
 				tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
 				tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
 				tc_cfg.key_cfg_iova =
-					(uint64_t)priv->extract.fs_extract_param[flow->tc_id];
+					(uint64_t)priv->extract.tc_extract_param[flow->tc_id];
 				tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
 				tc_cfg.fs_cfg.keep_entries = true;
 				ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
@@ -1422,27 +2875,114 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
 			}
 
 			action.flow_id = action.flow_id % nic_attr.num_rx_tcs;
-			index = flow->index + (flow->tc_id * nic_attr.fs_entries);
-			flow->rule.key_size = flow->key_size;
+
+			if (!priv->qos_index) {
+				priv->qos_index = rte_zmalloc(0,
+						nic_attr.qos_entries, 64);
+			}
+			for (index = 0; index < nic_attr.qos_entries; index++) {
+				if (!priv->qos_index[index]) {
+					priv->qos_index[index] = 1;
+					break;
+				}
+			}
+			if (index >= nic_attr.qos_entries) {
+				DPAA2_PMD_ERR("QoS table with %d entries full",
+					nic_attr.qos_entries);
+				return -1;
+			}
+			flow->qos_rule.key_size = priv->extract
+				.qos_key_extract.key_info.key_total_size;
+			if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
+				if (flow->ipaddr_rule.qos_ipdst_offset >=
+					flow->ipaddr_rule.qos_ipsrc_offset) {
+					flow->qos_rule.key_size =
+						flow->ipaddr_rule.qos_ipdst_offset +
+						NH_FLD_IPV4_ADDR_SIZE;
+				} else {
+					flow->qos_rule.key_size =
+						flow->ipaddr_rule.qos_ipsrc_offset +
+						NH_FLD_IPV4_ADDR_SIZE;
+				}
+			} else if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV6_ADDR) {
+				if (flow->ipaddr_rule.qos_ipdst_offset >=
+					flow->ipaddr_rule.qos_ipsrc_offset) {
+					flow->qos_rule.key_size =
+						flow->ipaddr_rule.qos_ipdst_offset +
+						NH_FLD_IPV6_ADDR_SIZE;
+				} else {
+					flow->qos_rule.key_size =
+						flow->ipaddr_rule.qos_ipsrc_offset +
+						NH_FLD_IPV6_ADDR_SIZE;
+				}
+			}
 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
-						priv->token, &flow->rule,
+						priv->token, &flow->qos_rule,
 						flow->tc_id, index,
 						0, 0);
 			if (ret < 0) {
 				DPAA2_PMD_ERR(
 				"Error in addnig entry to QoS table(%d)", ret);
+				priv->qos_index[index] = 0;
 				return ret;
 			}
+			flow->qos_index = index;
 
 			/* Then Configure FS table */
+			if (!priv->fs_index) {
+				priv->fs_index = rte_zmalloc(0,
+								nic_attr.fs_entries, 64);
+			}
+			for (index = 0; index < nic_attr.fs_entries; index++) {
+				if (!priv->fs_index[index]) {
+					priv->fs_index[index] = 1;
+					break;
+				}
+			}
+			if (index >= nic_attr.fs_entries) {
+				DPAA2_PMD_ERR("FS table with %d entries full",
+					nic_attr.fs_entries);
+				return -1;
+			}
+			flow->fs_rule.key_size = priv->extract
+					.tc_key_extract[attr->group].key_info.key_total_size;
+			if (flow->ipaddr_rule.ipaddr_type ==
+				FLOW_IPV4_ADDR) {
+				if (flow->ipaddr_rule.fs_ipdst_offset >=
+					flow->ipaddr_rule.fs_ipsrc_offset) {
+					flow->fs_rule.key_size =
+						flow->ipaddr_rule.fs_ipdst_offset +
+						NH_FLD_IPV4_ADDR_SIZE;
+				} else {
+					flow->fs_rule.key_size =
+						flow->ipaddr_rule.fs_ipsrc_offset +
+						NH_FLD_IPV4_ADDR_SIZE;
+				}
+			} else if (flow->ipaddr_rule.ipaddr_type ==
+				FLOW_IPV6_ADDR) {
+				if (flow->ipaddr_rule.fs_ipdst_offset >=
+					flow->ipaddr_rule.fs_ipsrc_offset) {
+					flow->fs_rule.key_size =
+						flow->ipaddr_rule.fs_ipdst_offset +
+						NH_FLD_IPV6_ADDR_SIZE;
+				} else {
+					flow->fs_rule.key_size =
+						flow->ipaddr_rule.fs_ipsrc_offset +
+						NH_FLD_IPV6_ADDR_SIZE;
+				}
+			}
 			ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
-						flow->tc_id, flow->index,
-						&flow->rule, &action);
+						flow->tc_id, index,
+						&flow->fs_rule, &action);
 			if (ret < 0) {
 				DPAA2_PMD_ERR(
 				"Error in adding entry to FS table(%d)", ret);
+				priv->fs_index[index] = 0;
 				return ret;
 			}
+			flow->fs_index = index;
+			memcpy(&flow->action_cfg, &action,
+				sizeof(struct dpni_fs_action_cfg));
 			break;
 		case RTE_FLOW_ACTION_TYPE_RSS:
 			ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
@@ -1465,7 +3005,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
 
 			flow->action = RTE_FLOW_ACTION_TYPE_RSS;
 			ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
-								&key_cfg);
+					&priv->extract.tc_key_extract[flow->tc_id].dpkg);
 			if (ret < 0) {
 				DPAA2_PMD_ERR(
 				"unable to set flow distribution.please check queue config\n");
@@ -1479,7 +3019,9 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
 				return -1;
 			}
 
-			if (dpkg_prepare_key_cfg(&key_cfg, (uint8_t *)param) < 0) {
+			if (dpkg_prepare_key_cfg(
+				&priv->extract.tc_key_extract[flow->tc_id].dpkg,
+				(uint8_t *)param) < 0) {
 				DPAA2_PMD_ERR(
 				"Unable to prepare extract parameters");
 				rte_free((void *)param);
@@ -1503,8 +3045,9 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
 			}
 
 			rte_free((void *)param);
-			if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
-				if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
+			if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
+				if (dpkg_prepare_key_cfg(
+					&priv->extract.qos_key_extract.dpkg,
 					(uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
 					DPAA2_PMD_ERR(
 					"Unable to prepare extract parameters");
@@ -1514,29 +3057,47 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
 					sizeof(struct dpni_qos_tbl_cfg));
 				qos_cfg.discard_on_miss = true;
 				qos_cfg.keep_entries = true;
-				qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
+				qos_cfg.key_cfg_iova =
+					(size_t)priv->extract.qos_extract_param;
 				ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
 							 priv->token, &qos_cfg);
 				if (ret < 0) {
 					DPAA2_PMD_ERR(
-					"Distribution can not be configured(%d)\n",
+					"Distribution can't be configured %d\n",
 					ret);
 					return -1;
 				}
 			}
 
 			/* Add Rule into QoS table */
-			index = flow->index + (flow->tc_id * nic_attr.fs_entries);
-			flow->rule.key_size = flow->key_size;
+			if (!priv->qos_index) {
+				priv->qos_index = rte_zmalloc(0,
+						nic_attr.qos_entries, 64);
+			}
+			for (index = 0; index < nic_attr.qos_entries; index++) {
+				if (!priv->qos_index[index]) {
+					priv->qos_index[index] = 1;
+					break;
+				}
+			}
+			if (index >= nic_attr.qos_entries) {
+				DPAA2_PMD_ERR("QoS table with %d entries full",
+					nic_attr.qos_entries);
+				return -1;
+			}
+			flow->qos_rule.key_size =
+			  priv->extract.qos_key_extract.key_info.key_total_size;
 			ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
-						&flow->rule, flow->tc_id,
+						&flow->qos_rule, flow->tc_id,
 						index, 0, 0);
 			if (ret < 0) {
 				DPAA2_PMD_ERR(
 				"Error in entry addition in QoS table(%d)",
 				ret);
+				priv->qos_index[index] = 0;
 				return ret;
 			}
+			flow->qos_index = index;
 			break;
 		case RTE_FLOW_ACTION_TYPE_END:
 			end_of_list = 1;
@@ -1550,6 +3111,12 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
 	}
 
 	if (!ret) {
+		ret = dpaa2_flow_entry_update(priv, flow->tc_id);
+		if (ret) {
+			DPAA2_PMD_ERR("Flow entry update failed.");
+
+			return -1;
+		}
 		/* New rules are inserted. */
 		if (!curr) {
 			LIST_INSERT_HEAD(&priv->flows, flow, next);
@@ -1625,15 +3192,15 @@ dpaa2_dev_update_default_mask(const struct rte_flow_item *pattern)
 }
 
 static inline int
-dpaa2_dev_verify_patterns(struct dpaa2_dev_priv *dev_priv,
-			  const struct rte_flow_item pattern[])
+dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
 {
-	unsigned int i, j, k, is_found = 0;
+	unsigned int i, j, is_found = 0;
 	int ret = 0;
 
 	for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
 		for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
-			if (dpaa2_supported_pattern_type[i] == pattern[j].type) {
+			if (dpaa2_supported_pattern_type[i]
+					== pattern[j].type) {
 				is_found = 1;
 				break;
 			}
@@ -1653,18 +3220,6 @@ dpaa2_dev_verify_patterns(struct dpaa2_dev_priv *dev_priv,
 			dpaa2_dev_update_default_mask(&pattern[j]);
 	}
 
-	/* DPAA2 platform has a limitation that extract parameter can not be */
-	/* more	than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
-	for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
-		for (j = 0; j < MAX_TCS + 1; j++) {
-				for (k = 0; k < DPKG_MAX_NUM_OF_EXTRACTS; k++) {
-					if (dev_priv->pattern[j].pattern_type[k] == pattern[i].type)
-						break;
-				}
-			if (dev_priv->pattern[j].item_count >= DPKG_MAX_NUM_OF_EXTRACTS)
-				ret = -ENOTSUP;
-		}
-	}
 	return ret;
 }
 
@@ -1687,7 +3242,8 @@ dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
 		}
 	}
 	for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
-		if ((actions[j].type != RTE_FLOW_ACTION_TYPE_DROP) && (!actions[j].conf))
+		if ((actions[j].type
+			!= RTE_FLOW_ACTION_TYPE_DROP) && (!actions[j].conf))
 			ret = -EINVAL;
 	}
 	return ret;
@@ -1729,7 +3285,7 @@ int dpaa2_flow_validate(struct rte_eth_dev *dev,
 		goto not_valid_params;
 	}
 	/* Verify input pattern list */
-	ret = dpaa2_dev_verify_patterns(priv, pattern);
+	ret = dpaa2_dev_verify_patterns(pattern);
 	if (ret < 0) {
 		DPAA2_PMD_ERR(
 			"Invalid pattern list is given\n");
@@ -1763,28 +3319,54 @@ struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
 	size_t key_iova = 0, mask_iova = 0;
 	int ret;
 
-	flow = rte_malloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
+	flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
 	if (!flow) {
 		DPAA2_PMD_ERR("Failure to allocate memory for flow");
 		goto mem_failure;
 	}
 	/* Allocate DMA'ble memory to write the rules */
-	key_iova = (size_t)rte_malloc(NULL, 256, 64);
+	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
+	if (!key_iova) {
+		DPAA2_PMD_ERR(
+			"Memory allocation failure for rule configration\n");
+		goto mem_failure;
+	}
+	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
+	if (!mask_iova) {
+		DPAA2_PMD_ERR(
+			"Memory allocation failure for rule configration\n");
+		goto mem_failure;
+	}
+
+	flow->qos_rule.key_iova = key_iova;
+	flow->qos_rule.mask_iova = mask_iova;
+
+	/* Allocate DMA'ble memory to write the rules */
+	key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
 	if (!key_iova) {
 		DPAA2_PMD_ERR(
-			"Memory allocation failure for rule configuration\n");
+			"Memory allocation failure for rule configration\n");
 		goto mem_failure;
 	}
-	mask_iova = (size_t)rte_malloc(NULL, 256, 64);
+	mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
 	if (!mask_iova) {
 		DPAA2_PMD_ERR(
-			"Memory allocation failure for rule configuration\n");
+			"Memory allocation failure for rule configration\n");
 		goto mem_failure;
 	}
 
-	flow->rule.key_iova = key_iova;
-	flow->rule.mask_iova = mask_iova;
-	flow->key_size = 0;
+	flow->fs_rule.key_iova = key_iova;
+	flow->fs_rule.mask_iova = mask_iova;
+
+	flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
+	flow->ipaddr_rule.qos_ipsrc_offset =
+		IP_ADDRESS_OFFSET_INVALID;
+	flow->ipaddr_rule.qos_ipdst_offset =
+		IP_ADDRESS_OFFSET_INVALID;
+	flow->ipaddr_rule.fs_ipsrc_offset =
+		IP_ADDRESS_OFFSET_INVALID;
+	flow->ipaddr_rule.fs_ipdst_offset =
+		IP_ADDRESS_OFFSET_INVALID;
 
 	switch (dpaa2_filter_type) {
 	case RTE_ETH_FILTER_GENERIC:
@@ -1832,25 +3414,27 @@ int dpaa2_flow_destroy(struct rte_eth_dev *dev,
 	case RTE_FLOW_ACTION_TYPE_QUEUE:
 		/* Remove entry from QoS table first */
 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
-					   &flow->rule);
+					   &flow->qos_rule);
 		if (ret < 0) {
 			DPAA2_PMD_ERR(
 				"Error in adding entry to QoS table(%d)", ret);
 			goto error;
 		}
+		priv->qos_index[flow->qos_index] = 0;
 
 		/* Then remove entry from FS table */
 		ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
-					   flow->tc_id, &flow->rule);
+					   flow->tc_id, &flow->fs_rule);
 		if (ret < 0) {
 			DPAA2_PMD_ERR(
 				"Error in entry addition in FS table(%d)", ret);
 			goto error;
 		}
+		priv->fs_index[flow->fs_index] = 0;
 		break;
 	case RTE_FLOW_ACTION_TYPE_RSS:
 		ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
-					   &flow->rule);
+					   &flow->qos_rule);
 		if (ret < 0) {
 			DPAA2_PMD_ERR(
 			"Error in entry addition in QoS table(%d)", ret);
-- 
2.17.1


  parent reply	other threads:[~2020-07-07  9:30 UTC|newest]

Thread overview: 83+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-27 13:22 [dpdk-dev] [PATCH 00/37] NXP DPAAx enhancements Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 01/37] bus/fslmc: fix getting the FD error Hemant Agrawal
2020-05-27 18:07   ` Akhil Goyal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 02/37] net/dpaa: fix fd offset data type Hemant Agrawal
2020-05-27 18:08   ` Akhil Goyal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 03/37] net/dpaa2: enable timestamp for Rx offload case as well Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 04/37] bus/fslmc: combine thread specific variables Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 05/37] bus/fslmc: rework portal allocation to a per thread basis Hemant Agrawal
2020-07-01  7:23   ` Ferruh Yigit
2020-05-27 13:22 ` [dpdk-dev] [PATCH 06/37] bus/fslmc: support handle portal alloc failure Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 07/37] bus/fslmc: support portal migration Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 08/37] bus/fslmc: rename the cinh read functions used for ls1088 Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 09/37] net/dpaa: enable Tx queue taildrop Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 10/37] net/dpaa: add 2.5G support Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 11/37] net/dpaa: update process specific device info Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 12/37] drivers: optimize thread local storage for dpaa Hemant Agrawal
2020-05-27 18:13   ` Akhil Goyal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 13/37] bus/dpaa: enable link state interrupt Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 14/37] bus/dpaa: enable set link status Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 15/37] net/dpaa: add support for fmlib in dpdk Hemant Agrawal
2020-06-30 17:00   ` Ferruh Yigit
2020-07-01  4:18     ` Hemant Agrawal
2020-07-01  7:35       ` Ferruh Yigit
2020-05-27 13:23 ` [dpdk-dev] [PATCH 16/37] net/dpaa: add VSP support in FMLIB Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 17/37] net/dpaa: add support for fmcless mode Hemant Agrawal
2020-06-30 17:01   ` Ferruh Yigit
2020-07-01  4:04     ` Hemant Agrawal
2020-07-01  7:37       ` Ferruh Yigit
2020-05-27 13:23 ` [dpdk-dev] [PATCH 18/37] bus/dpaa: add shared MAC support Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 19/37] bus/dpaa: add Virtual Storage Profile port init Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 20/37] net/dpaa: add support for Virtual Storage Profile Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 21/37] net/dpaa: add fmc parser support for VSP Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 22/37] net/dpaa: add RSS update func with FMCless Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 23/37] net/dpaa2: dynamic flow control support Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 24/37] net/dpaa2: key extracts of flow API Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 25/37] net/dpaa2: sanity check for flow extracts Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 26/37] net/dpaa2: free flow rule memory Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 27/37] net/dpaa2: flow QoS or FS table entry indexing Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 28/37] net/dpaa2: define the size of table entry Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 29/37] net/dpaa2: log of flow extracts and rules Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 30/37] net/dpaa2: discrimination between IPv4 and IPv6 Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 31/37] net/dpaa2: distribution size set on multiple TCs Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 32/37] net/dpaa2: index of queue action for flow Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 33/37] net/dpaa2: flow data sanity check Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 34/37] net/dpaa2: flow API QoS setup follows FS setup Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 35/37] net/dpaa2: flow API FS miss action configuration Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 36/37] net/dpaa2: configure per class distribution size Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 37/37] net/dpaa2: support raw flow classification Hemant Agrawal
2020-06-30 17:01 ` [dpdk-dev] [PATCH 00/37] NXP DPAAx enhancements Ferruh Yigit
2020-07-01  4:08   ` Hemant Agrawal
2020-07-07  9:22 ` [dpdk-dev] [PATCH v2 00/29] " Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 01/29] bus/fslmc: fix getting the FD error Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 02/29] net/dpaa: fix fd offset data type Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 03/29] net/dpaa2: enable timestamp for Rx offload case as well Hemant Agrawal
2020-07-11 13:46     ` Thomas Monjalon
2020-07-13  3:47       ` Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 04/29] bus/fslmc: combine thread specific variables Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 05/29] bus/fslmc: rework portal allocation to a per thread basis Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 06/29] bus/fslmc: support handle portal alloc failure Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 07/29] bus/fslmc: support portal migration Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 08/29] bus/fslmc: rename the cinh read functions used for ls1088 Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 09/29] net/dpaa: enable Tx queue taildrop Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 10/29] net/dpaa: add 2.5G support Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 11/29] net/dpaa: update process specific device info Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 12/29] drivers: optimize thread local storage for dpaa Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 13/29] bus/dpaa: enable link state interrupt Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 14/29] bus/dpaa: enable set link status Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 15/29] net/dpaa2: support dynamic flow control Hemant Agrawal
2020-07-07  9:22   ` Hemant Agrawal [this message]
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 17/29] net/dpaa2: add sanity check for flow extracts Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 18/29] net/dpaa2: free flow rule memory Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 19/29] net/dpaa2: support QoS or FS table entry indexing Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 20/29] net/dpaa2: define the size of table entry Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 21/29] net/dpaa2: add logging of flow extracts and rules Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 22/29] net/dpaa2: support iscrimination between IPv4 and IPv6 Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 23/29] net/dpaa2: support distribution size set on multiple TCs Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 24/29] net/dpaa2: support ndex of queue action for flow Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 25/29] net/dpaa2: add flow data sanity check Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 26/29] net/dpaa2: modify flow API QoS setup to follow FS setup Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 27/29] net/dpaa2: support flow API FS miss action configuration Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 28/29] net/dpaa2: configure per class distribution size Hemant Agrawal
2020-07-07  9:22   ` [dpdk-dev] [PATCH v2 29/29] net/dpaa2: support raw flow classification Hemant Agrawal
2020-07-09  1:54   ` [dpdk-dev] [PATCH v2 00/29] NXP DPAAx enhancements Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200707092244.12791-17-hemant.agrawal@nxp.com \
    --to=hemant.agrawal@nxp.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=jun.yang@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).