DPDK patches and discussions
 help / color / mirror / Atom feed
From: vanshika.shukla@nxp.com
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
	Sachin Saxena <sachin.saxena@nxp.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [v2 29/43] net/dpaa2: protocol inside tunnel distribution
Date: Wed, 18 Sep 2024 13:20:42 +0530	[thread overview]
Message-ID: <20240918075056.1838654-30-vanshika.shukla@nxp.com> (raw)
In-Reply-To: <20240918075056.1838654-1-vanshika.shukla@nxp.com>

From: Jun Yang <jun.yang@nxp.com>

Control flow by protocols inside tunnel.
The tunnel flow items applied by application are in order from
outer to inner. The inner items start from tunnel item, something
like vxlan, GRE etc.

For example:
flow create 0 ingress pattern ipv4 / vxlan / ipv6 / end
	actions pf / queue index 2 / end

So the items following the tunnel item are tagged with "innner".
The inner items are extracted from parser results which are set
by soft parser.
So far only vxlan tunnel is supported. Limited by soft parser area,
only ethernet header and vlan header inside tunnel are able to be used
for flow distribution. IPv4, IPv6, UDP and TCP inside tunnel can be
detected by user defined FAF set by SP for flow distribution.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/net/dpaa2/dpaa2_flow.c | 587 +++++++++++++++++++++++++++++----
 1 file changed, 519 insertions(+), 68 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c
index 7bec13d4eb..e4d7117192 100644
--- a/drivers/net/dpaa2/dpaa2_flow.c
+++ b/drivers/net/dpaa2/dpaa2_flow.c
@@ -58,6 +58,11 @@ struct dpaa2_dev_flow {
 	struct dpni_fs_action_cfg fs_action_cfg;
 };
 
+struct rte_dpaa2_flow_item {
+	struct rte_flow_item generic_item;
+	int in_tunnel;
+};
+
 static const
 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
 	RTE_FLOW_ITEM_TYPE_END,
@@ -1939,10 +1944,203 @@ dpaa2_flow_add_ipaddr_extract_rule(struct dpaa2_dev_flow *flow,
 }
 
 static int
-dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow,
+dpaa2_configure_flow_tunnel_eth(struct dpaa2_dev_flow *flow,
 	struct rte_eth_dev *dev,
 	const struct rte_flow_attr *attr,
 	const struct rte_flow_item *pattern,
+	int *device_configured)
+{
+	int ret, local_cfg = 0;
+	uint32_t group;
+	const struct rte_flow_item_eth *spec, *mask;
+	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
+
+	group = attr->group;
+
+	/* Parse pattern list to get the matching parameters */
+	spec = pattern->spec;
+	mask = pattern->mask ?
+			pattern->mask : &dpaa2_flow_item_eth_mask;
+
+	/* Get traffic class index and flow id to be configured */
+	flow->tc_id = group;
+	flow->tc_index = attr->priority;
+
+	if (!spec)
+		return 0;
+
+	if (dpaa2_flow_extract_support((const uint8_t *)mask,
+		RTE_FLOW_ITEM_TYPE_ETH)) {
+		DPAA2_PMD_WARN("Extract field(s) of ethernet failed");
+
+		return -EINVAL;
+	}
+
+	if (memcmp((const char *)&mask->src,
+		zero_cmp, RTE_ETHER_ADDR_LEN)) {
+		/*SRC[0:1]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_SADDR0_OFFSET,
+			1, &spec->src.addr_bytes[0],
+			&mask->src.addr_bytes[0],
+			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+		if (ret)
+			return ret;
+		/*SRC[1:2]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_SADDR1_OFFSET,
+			2, &spec->src.addr_bytes[1],
+			&mask->src.addr_bytes[1],
+			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+		if (ret)
+			return ret;
+		/*SRC[3:1]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_SADDR3_OFFSET,
+			1, &spec->src.addr_bytes[3],
+			&mask->src.addr_bytes[3],
+			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+		if (ret)
+			return ret;
+		/*SRC[4:2]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_SADDR4_OFFSET,
+			2, &spec->src.addr_bytes[4],
+			&mask->src.addr_bytes[4],
+			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+		if (ret)
+			return ret;
+
+		/*SRC[0:1]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_SADDR0_OFFSET,
+			1, &spec->src.addr_bytes[0],
+			&mask->src.addr_bytes[0],
+			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+		if (ret)
+			return ret;
+		/*SRC[1:2]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_SADDR1_OFFSET,
+			2, &spec->src.addr_bytes[1],
+			&mask->src.addr_bytes[1],
+			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+		if (ret)
+			return ret;
+		/*SRC[3:1]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_SADDR3_OFFSET,
+			1, &spec->src.addr_bytes[3],
+			&mask->src.addr_bytes[3],
+			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+		if (ret)
+			return ret;
+		/*SRC[4:2]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_SADDR4_OFFSET,
+			2, &spec->src.addr_bytes[4],
+			&mask->src.addr_bytes[4],
+			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+		if (ret)
+			return ret;
+	}
+
+	if (memcmp((const char *)&mask->dst,
+		zero_cmp, RTE_ETHER_ADDR_LEN)) {
+		/*DST[0:1]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_DADDR0_OFFSET,
+			1, &spec->dst.addr_bytes[0],
+			&mask->dst.addr_bytes[0],
+			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+		if (ret)
+			return ret;
+		/*DST[1:1]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_DADDR1_OFFSET,
+			1, &spec->dst.addr_bytes[1],
+			&mask->dst.addr_bytes[1],
+			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+		if (ret)
+			return ret;
+		/*DST[2:3]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_DADDR2_OFFSET,
+			3, &spec->dst.addr_bytes[2],
+			&mask->dst.addr_bytes[2],
+			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+		if (ret)
+			return ret;
+		/*DST[5:1]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_DADDR5_OFFSET,
+			1, &spec->dst.addr_bytes[5],
+			&mask->dst.addr_bytes[5],
+			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+		if (ret)
+			return ret;
+
+		/*DST[0:1]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_DADDR0_OFFSET,
+			1, &spec->dst.addr_bytes[0],
+			&mask->dst.addr_bytes[0],
+			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+		if (ret)
+			return ret;
+		/*DST[1:1]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_DADDR1_OFFSET,
+			1, &spec->dst.addr_bytes[1],
+			&mask->dst.addr_bytes[1],
+			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+		if (ret)
+			return ret;
+		/*DST[2:3]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_DADDR2_OFFSET,
+			3, &spec->dst.addr_bytes[2],
+			&mask->dst.addr_bytes[2],
+			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+		if (ret)
+			return ret;
+		/*DST[5:1]*/
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_DADDR5_OFFSET,
+			1, &spec->dst.addr_bytes[5],
+			&mask->dst.addr_bytes[5],
+			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+		if (ret)
+			return ret;
+	}
+
+	if (memcmp((const char *)&mask->type,
+		zero_cmp, sizeof(rte_be16_t))) {
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_TYPE_OFFSET,
+			sizeof(rte_be16_t), &spec->type, &mask->type,
+			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+		if (ret)
+			return ret;
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_TYPE_OFFSET,
+			sizeof(rte_be16_t), &spec->type, &mask->type,
+			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+		if (ret)
+			return ret;
+	}
+
+	(*device_configured) |= local_cfg;
+
+	return 0;
+}
+
+static int
+dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow,
+	struct rte_eth_dev *dev,
+	const struct rte_flow_attr *attr,
+	const struct rte_dpaa2_flow_item *dpaa2_pattern,
 	const struct rte_flow_action actions[] __rte_unused,
 	struct rte_flow_error *error __rte_unused,
 	int *device_configured)
@@ -1952,6 +2150,13 @@ dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow,
 	const struct rte_flow_item_eth *spec, *mask;
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
 	const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
+	const struct rte_flow_item *pattern =
+		&dpaa2_pattern->generic_item;
+
+	if (dpaa2_pattern->in_tunnel) {
+		return dpaa2_configure_flow_tunnel_eth(flow,
+				dev, attr, pattern, device_configured);
+	}
 
 	group = attr->group;
 
@@ -2045,10 +2250,81 @@ dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow,
 }
 
 static int
-dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow,
+dpaa2_configure_flow_tunnel_vlan(struct dpaa2_dev_flow *flow,
 	struct rte_eth_dev *dev,
 	const struct rte_flow_attr *attr,
 	const struct rte_flow_item *pattern,
+	int *device_configured)
+{
+	int ret, local_cfg = 0;
+	uint32_t group;
+	const struct rte_flow_item_vlan *spec, *mask;
+	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+	group = attr->group;
+
+	/* Parse pattern list to get the matching parameters */
+	spec = pattern->spec;
+	mask = pattern->mask ?
+		pattern->mask : &dpaa2_flow_item_vlan_mask;
+
+	/* Get traffic class index and flow id to be configured */
+	flow->tc_id = group;
+	flow->tc_index = attr->priority;
+
+	if (!spec) {
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+				FAFE_VXLAN_IN_VLAN_FRAM,
+				DPAA2_FLOW_QOS_TYPE,
+				group, &local_cfg);
+		if (ret)
+			return ret;
+
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+				FAFE_VXLAN_IN_VLAN_FRAM,
+				DPAA2_FLOW_FS_TYPE,
+				group, &local_cfg);
+		if (ret)
+			return ret;
+
+		(*device_configured) |= local_cfg;
+		return 0;
+	}
+
+	if (dpaa2_flow_extract_support((const uint8_t *)mask,
+		RTE_FLOW_ITEM_TYPE_VLAN)) {
+		DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
+
+		return -EINVAL;
+	}
+
+	if (!mask->tci)
+		return 0;
+
+	ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_TCI_OFFSET,
+			sizeof(rte_be16_t), &spec->tci, &mask->tci,
+			priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+	if (ret)
+		return ret;
+
+	ret = dpaa2_flow_add_pr_extract_rule(flow,
+			DPAA2_VXLAN_IN_TCI_OFFSET,
+			sizeof(rte_be16_t), &spec->tci, &mask->tci,
+			priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+	if (ret)
+		return ret;
+
+	(*device_configured) |= local_cfg;
+
+	return 0;
+}
+
+static int
+dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow,
+	struct rte_eth_dev *dev,
+	const struct rte_flow_attr *attr,
+	const struct rte_dpaa2_flow_item *dpaa2_pattern,
 	const struct rte_flow_action actions[] __rte_unused,
 	struct rte_flow_error *error __rte_unused,
 	int *device_configured)
@@ -2057,6 +2333,13 @@ dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow,
 	uint32_t group;
 	const struct rte_flow_item_vlan *spec, *mask;
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+	const struct rte_flow_item *pattern =
+		&dpaa2_pattern->generic_item;
+
+	if (dpaa2_pattern->in_tunnel) {
+		return dpaa2_configure_flow_tunnel_vlan(flow,
+				dev, attr, pattern, device_configured);
+	}
 
 	group = attr->group;
 
@@ -2116,7 +2399,7 @@ dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow,
 static int
 dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
 			  const struct rte_flow_attr *attr,
-			  const struct rte_flow_item *pattern,
+			  const struct rte_dpaa2_flow_item *dpaa2_pattern,
 			  const struct rte_flow_action actions[] __rte_unused,
 			  struct rte_flow_error *error __rte_unused,
 			  int *device_configured)
@@ -2127,6 +2410,7 @@ dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
 	const void *key, *mask;
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
 	int size;
+	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
 
 	group = attr->group;
 
@@ -2135,6 +2419,26 @@ dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
 	mask_ipv4 = pattern->mask ?
 		    pattern->mask : &dpaa2_flow_item_ipv4_mask;
 
+	if (dpaa2_pattern->in_tunnel) {
+		if (spec_ipv4) {
+			DPAA2_PMD_ERR("Tunnel-IPv4 distribution not support");
+			return -ENOTSUP;
+		}
+
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+						 FAFE_VXLAN_IN_IPV4_FRAM,
+						 DPAA2_FLOW_QOS_TYPE, group,
+						 &local_cfg);
+		if (ret)
+			return ret;
+
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+						 FAFE_VXLAN_IN_IPV4_FRAM,
+						 DPAA2_FLOW_FS_TYPE, group,
+						 &local_cfg);
+		return ret;
+	}
+
 	/* Get traffic class index and flow id to be configured */
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
@@ -2233,7 +2537,7 @@ dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
 static int
 dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
 			  const struct rte_flow_attr *attr,
-			  const struct rte_flow_item *pattern,
+			  const struct rte_dpaa2_flow_item *dpaa2_pattern,
 			  const struct rte_flow_action actions[] __rte_unused,
 			  struct rte_flow_error *error __rte_unused,
 			  int *device_configured)
@@ -2245,6 +2549,7 @@ dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
 	const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
 	int size;
+	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
 
 	group = attr->group;
 
@@ -2256,6 +2561,26 @@ dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
 
+	if (dpaa2_pattern->in_tunnel) {
+		if (spec_ipv6) {
+			DPAA2_PMD_ERR("Tunnel-IPv6 distribution not support");
+			return -ENOTSUP;
+		}
+
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+						 FAFE_VXLAN_IN_IPV6_FRAM,
+						 DPAA2_FLOW_QOS_TYPE, group,
+						 &local_cfg);
+		if (ret)
+			return ret;
+
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+						 FAFE_VXLAN_IN_IPV6_FRAM,
+						 DPAA2_FLOW_FS_TYPE, group,
+						 &local_cfg);
+		return ret;
+	}
+
 	ret = dpaa2_flow_identify_by_faf(priv, flow, FAF_IPV6_FRAM,
 					 DPAA2_FLOW_QOS_TYPE, group,
 					 &local_cfg);
@@ -2352,7 +2677,7 @@ static int
 dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow,
 	struct rte_eth_dev *dev,
 	const struct rte_flow_attr *attr,
-	const struct rte_flow_item *pattern,
+	const struct rte_dpaa2_flow_item *dpaa2_pattern,
 	const struct rte_flow_action actions[] __rte_unused,
 	struct rte_flow_error *error __rte_unused,
 	int *device_configured)
@@ -2361,6 +2686,7 @@ dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow,
 	uint32_t group;
 	const struct rte_flow_item_icmp *spec, *mask;
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
 
 	group = attr->group;
 
@@ -2373,6 +2699,11 @@ dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow,
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
 
+	if (dpaa2_pattern->in_tunnel) {
+		DPAA2_PMD_ERR("Tunnel-ICMP distribution not support");
+		return -ENOTSUP;
+	}
+
 	if (!spec) {
 		ret = dpaa2_flow_identify_by_faf(priv, flow,
 				FAF_ICMP_FRAM, DPAA2_FLOW_QOS_TYPE,
@@ -2438,7 +2769,7 @@ static int
 dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow,
 	struct rte_eth_dev *dev,
 	const struct rte_flow_attr *attr,
-	const struct rte_flow_item *pattern,
+	const struct rte_dpaa2_flow_item *dpaa2_pattern,
 	const struct rte_flow_action actions[] __rte_unused,
 	struct rte_flow_error *error __rte_unused,
 	int *device_configured)
@@ -2447,6 +2778,7 @@ dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow,
 	uint32_t group;
 	const struct rte_flow_item_udp *spec, *mask;
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
 
 	group = attr->group;
 
@@ -2459,6 +2791,26 @@ dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow,
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
 
+	if (dpaa2_pattern->in_tunnel) {
+		if (spec) {
+			DPAA2_PMD_ERR("Tunnel-UDP distribution not support");
+			return -ENOTSUP;
+		}
+
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+						 FAFE_VXLAN_IN_UDP_FRAM,
+						 DPAA2_FLOW_QOS_TYPE, group,
+						 &local_cfg);
+		if (ret)
+			return ret;
+
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+						 FAFE_VXLAN_IN_UDP_FRAM,
+						 DPAA2_FLOW_FS_TYPE, group,
+						 &local_cfg);
+		return ret;
+	}
+
 	ret = dpaa2_flow_identify_by_faf(priv, flow,
 			FAF_UDP_FRAM, DPAA2_FLOW_QOS_TYPE,
 			group, &local_cfg);
@@ -2524,7 +2876,7 @@ static int
 dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow,
 	struct rte_eth_dev *dev,
 	const struct rte_flow_attr *attr,
-	const struct rte_flow_item *pattern,
+	const struct rte_dpaa2_flow_item *dpaa2_pattern,
 	const struct rte_flow_action actions[] __rte_unused,
 	struct rte_flow_error *error __rte_unused,
 	int *device_configured)
@@ -2533,6 +2885,7 @@ dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow,
 	uint32_t group;
 	const struct rte_flow_item_tcp *spec, *mask;
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
 
 	group = attr->group;
 
@@ -2545,6 +2898,26 @@ dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow,
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
 
+	if (dpaa2_pattern->in_tunnel) {
+		if (spec) {
+			DPAA2_PMD_ERR("Tunnel-TCP distribution not support");
+			return -ENOTSUP;
+		}
+
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+						 FAFE_VXLAN_IN_TCP_FRAM,
+						 DPAA2_FLOW_QOS_TYPE, group,
+						 &local_cfg);
+		if (ret)
+			return ret;
+
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+						 FAFE_VXLAN_IN_TCP_FRAM,
+						 DPAA2_FLOW_FS_TYPE, group,
+						 &local_cfg);
+		return ret;
+	}
+
 	ret = dpaa2_flow_identify_by_faf(priv, flow,
 			FAF_TCP_FRAM, DPAA2_FLOW_QOS_TYPE,
 			group, &local_cfg);
@@ -2610,7 +2983,7 @@ static int
 dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow,
 	struct rte_eth_dev *dev,
 	const struct rte_flow_attr *attr,
-	const struct rte_flow_item *pattern,
+	const struct rte_dpaa2_flow_item *dpaa2_pattern,
 	const struct rte_flow_action actions[] __rte_unused,
 	struct rte_flow_error *error __rte_unused,
 	int *device_configured)
@@ -2619,6 +2992,7 @@ dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow,
 	uint32_t group;
 	const struct rte_flow_item_sctp *spec, *mask;
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
 
 	group = attr->group;
 
@@ -2631,6 +3005,11 @@ dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow,
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
 
+	if (dpaa2_pattern->in_tunnel) {
+		DPAA2_PMD_ERR("Tunnel-SCTP distribution not support");
+		return -ENOTSUP;
+	}
+
 	ret = dpaa2_flow_identify_by_faf(priv, flow,
 			FAF_SCTP_FRAM, DPAA2_FLOW_QOS_TYPE,
 			group, &local_cfg);
@@ -2696,7 +3075,7 @@ static int
 dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow,
 	struct rte_eth_dev *dev,
 	const struct rte_flow_attr *attr,
-	const struct rte_flow_item *pattern,
+	const struct rte_dpaa2_flow_item *dpaa2_pattern,
 	const struct rte_flow_action actions[] __rte_unused,
 	struct rte_flow_error *error __rte_unused,
 	int *device_configured)
@@ -2705,6 +3084,7 @@ dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow,
 	uint32_t group;
 	const struct rte_flow_item_gre *spec, *mask;
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
 
 	group = attr->group;
 
@@ -2717,6 +3097,11 @@ dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow,
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
 
+	if (dpaa2_pattern->in_tunnel) {
+		DPAA2_PMD_ERR("Tunnel-GRE distribution not support");
+		return -ENOTSUP;
+	}
+
 	if (!spec) {
 		ret = dpaa2_flow_identify_by_faf(priv, flow,
 				FAF_GRE_FRAM, DPAA2_FLOW_QOS_TYPE,
@@ -2767,7 +3152,7 @@ static int
 dpaa2_configure_flow_vxlan(struct dpaa2_dev_flow *flow,
 	struct rte_eth_dev *dev,
 	const struct rte_flow_attr *attr,
-	const struct rte_flow_item *pattern,
+	const struct rte_dpaa2_flow_item *dpaa2_pattern,
 	const struct rte_flow_action actions[] __rte_unused,
 	struct rte_flow_error *error __rte_unused,
 	int *device_configured)
@@ -2776,6 +3161,7 @@ dpaa2_configure_flow_vxlan(struct dpaa2_dev_flow *flow,
 	uint32_t group;
 	const struct rte_flow_item_vxlan *spec, *mask;
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
 
 	group = attr->group;
 
@@ -2788,6 +3174,11 @@ dpaa2_configure_flow_vxlan(struct dpaa2_dev_flow *flow,
 	flow->tc_id = group;
 	flow->tc_index = attr->priority;
 
+	if (dpaa2_pattern->in_tunnel) {
+		DPAA2_PMD_ERR("Tunnel-VXLAN distribution not support");
+		return -ENOTSUP;
+	}
+
 	if (!spec) {
 		ret = dpaa2_flow_identify_by_faf(priv, flow,
 				FAF_VXLAN_FRAM, DPAA2_FLOW_QOS_TYPE,
@@ -2851,18 +3242,19 @@ static int
 dpaa2_configure_flow_raw(struct dpaa2_dev_flow *flow,
 	struct rte_eth_dev *dev,
 	const struct rte_flow_attr *attr,
-	const struct rte_flow_item *pattern,
+	const struct rte_dpaa2_flow_item *dpaa2_pattern,
 	const struct rte_flow_action actions[] __rte_unused,
 	struct rte_flow_error *error __rte_unused,
 	int *device_configured)
 {
 	struct dpaa2_dev_priv *priv = dev->data->dev_private;
-	const struct rte_flow_item_raw *spec = pattern->spec;
-	const struct rte_flow_item_raw *mask = pattern->mask;
 	int local_cfg = 0, ret;
 	uint32_t group;
 	struct dpaa2_key_extract *qos_key_extract;
 	struct dpaa2_key_extract *tc_key_extract;
+	const struct rte_flow_item *pattern = &dpaa2_pattern->generic_item;
+	const struct rte_flow_item_raw *spec = pattern->spec;
+	const struct rte_flow_item_raw *mask = pattern->mask;
 
 	/* Need both spec and mask */
 	if (!spec || !mask) {
@@ -3306,6 +3698,45 @@ dpaa2_configure_qos_table(struct dpaa2_dev_priv *priv,
 	return 0;
 }
 
+static int
+dpaa2_flow_item_convert(const struct rte_flow_item pattern[],
+			struct rte_dpaa2_flow_item **dpaa2_pattern)
+{
+	struct rte_dpaa2_flow_item *new_pattern;
+	int num = 0, tunnel_start = 0;
+
+	while (1) {
+		num++;
+		if (pattern[num].type == RTE_FLOW_ITEM_TYPE_END)
+			break;
+	}
+
+	new_pattern = rte_malloc(NULL, sizeof(struct rte_dpaa2_flow_item) * num,
+				 RTE_CACHE_LINE_SIZE);
+	if (!new_pattern) {
+		DPAA2_PMD_ERR("Failed to alloc %d flow items", num);
+		return -ENOMEM;
+	}
+
+	num = 0;
+	while (pattern[num].type != RTE_FLOW_ITEM_TYPE_END) {
+		memcpy(&new_pattern[num].generic_item, &pattern[num],
+		       sizeof(struct rte_flow_item));
+		new_pattern[num].in_tunnel = 0;
+
+		if (pattern[num].type == RTE_FLOW_ITEM_TYPE_VXLAN)
+			tunnel_start = 1;
+		else if (tunnel_start)
+			new_pattern[num].in_tunnel = 1;
+		num++;
+	}
+
+	new_pattern[num].generic_item.type = RTE_FLOW_ITEM_TYPE_END;
+	*dpaa2_pattern = new_pattern;
+
+	return 0;
+}
+
 static int
 dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
 	struct rte_eth_dev *dev,
@@ -3322,6 +3753,7 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
 	uint16_t dist_size, key_size;
 	struct dpaa2_key_extract *qos_key_extract;
 	struct dpaa2_key_extract *tc_key_extract;
+	struct rte_dpaa2_flow_item *dpaa2_pattern = NULL;
 
 	ret = dpaa2_flow_verify_attr(priv, attr);
 	if (ret)
@@ -3331,107 +3763,121 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
 	if (ret)
 		return ret;
 
+	ret = dpaa2_flow_item_convert(pattern, &dpaa2_pattern);
+	if (ret)
+		return ret;
+
 	/* Parse pattern list to get the matching parameters */
 	while (!end_of_list) {
 		switch (pattern[i].type) {
 		case RTE_FLOW_ITEM_TYPE_ETH:
-			ret = dpaa2_configure_flow_eth(flow,
-					dev, attr, &pattern[i], actions, error,
-					&is_keycfg_configured);
+			ret = dpaa2_configure_flow_eth(flow, dev, attr,
+						       &dpaa2_pattern[i],
+						       actions, error,
+						       &is_keycfg_configured);
 			if (ret) {
 				DPAA2_PMD_ERR("ETH flow config failed!");
-				return ret;
+				goto end_flow_set;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_VLAN:
-			ret = dpaa2_configure_flow_vlan(flow,
-					dev, attr, &pattern[i], actions, error,
-					&is_keycfg_configured);
+			ret = dpaa2_configure_flow_vlan(flow, dev, attr,
+							&dpaa2_pattern[i],
+							actions, error,
+							&is_keycfg_configured);
 			if (ret) {
 				DPAA2_PMD_ERR("vLan flow config failed!");
-				return ret;
+				goto end_flow_set;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
-			ret = dpaa2_configure_flow_ipv4(flow,
-					dev, attr, &pattern[i], actions, error,
-					&is_keycfg_configured);
+			ret = dpaa2_configure_flow_ipv4(flow, dev, attr,
+							&dpaa2_pattern[i],
+							actions, error,
+							&is_keycfg_configured);
 			if (ret) {
 				DPAA2_PMD_ERR("IPV4 flow config failed!");
-				return ret;
+				goto end_flow_set;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
-			ret = dpaa2_configure_flow_ipv6(flow,
-					dev, attr, &pattern[i], actions, error,
-					&is_keycfg_configured);
+			ret = dpaa2_configure_flow_ipv6(flow, dev, attr,
+							&dpaa2_pattern[i],
+							actions, error,
+							&is_keycfg_configured);
 			if (ret) {
 				DPAA2_PMD_ERR("IPV6 flow config failed!");
-				return ret;
+				goto end_flow_set;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_ICMP:
-			ret = dpaa2_configure_flow_icmp(flow,
-					dev, attr, &pattern[i], actions, error,
-					&is_keycfg_configured);
+			ret = dpaa2_configure_flow_icmp(flow, dev, attr,
+							&dpaa2_pattern[i],
+							actions, error,
+							&is_keycfg_configured);
 			if (ret) {
 				DPAA2_PMD_ERR("ICMP flow config failed!");
-				return ret;
+				goto end_flow_set;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_UDP:
-			ret = dpaa2_configure_flow_udp(flow,
-					dev, attr, &pattern[i], actions, error,
-					&is_keycfg_configured);
+			ret = dpaa2_configure_flow_udp(flow, dev, attr,
+						       &dpaa2_pattern[i],
+						       actions, error,
+						       &is_keycfg_configured);
 			if (ret) {
 				DPAA2_PMD_ERR("UDP flow config failed!");
-				return ret;
+				goto end_flow_set;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_TCP:
-			ret = dpaa2_configure_flow_tcp(flow,
-					dev, attr, &pattern[i], actions, error,
-					&is_keycfg_configured);
+			ret = dpaa2_configure_flow_tcp(flow, dev, attr,
+						       &dpaa2_pattern[i],
+						       actions, error,
+						       &is_keycfg_configured);
 			if (ret) {
 				DPAA2_PMD_ERR("TCP flow config failed!");
-				return ret;
+				goto end_flow_set;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_SCTP:
-			ret = dpaa2_configure_flow_sctp(flow,
-					dev, attr, &pattern[i], actions, error,
-					&is_keycfg_configured);
+			ret = dpaa2_configure_flow_sctp(flow, dev, attr,
+							&dpaa2_pattern[i],
+							actions, error,
+							&is_keycfg_configured);
 			if (ret) {
 				DPAA2_PMD_ERR("SCTP flow config failed!");
-				return ret;
+				goto end_flow_set;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_GRE:
-			ret = dpaa2_configure_flow_gre(flow,
-					dev, attr, &pattern[i], actions, error,
-					&is_keycfg_configured);
+			ret = dpaa2_configure_flow_gre(flow, dev, attr,
+						       &dpaa2_pattern[i],
+						       actions, error,
+						       &is_keycfg_configured);
 			if (ret) {
 				DPAA2_PMD_ERR("GRE flow config failed!");
-				return ret;
+				goto end_flow_set;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN:
-			ret = dpaa2_configure_flow_vxlan(flow,
-					dev, attr, &pattern[i], actions, error,
-					&is_keycfg_configured);
+			ret = dpaa2_configure_flow_vxlan(flow, dev, attr,
+							 &dpaa2_pattern[i],
+							 actions, error,
+							 &is_keycfg_configured);
 			if (ret) {
 				DPAA2_PMD_ERR("VXLAN flow config failed!");
-				return ret;
+				goto end_flow_set;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_RAW:
-			ret = dpaa2_configure_flow_raw(flow,
-					dev, attr, &pattern[i],
-					actions, error,
-					&is_keycfg_configured);
+			ret = dpaa2_configure_flow_raw(flow, dev, attr,
+						       &dpaa2_pattern[i],
+						       actions, error,
+						       &is_keycfg_configured);
 			if (ret) {
 				DPAA2_PMD_ERR("RAW flow config failed!");
-				return ret;
+				goto end_flow_set;
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_END:
@@ -3463,7 +3909,7 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
 			ret = dpaa2_configure_flow_fs_action(priv, flow,
 							     &actions[j]);
 			if (ret)
-				return ret;
+				goto end_flow_set;
 
 			/* Configure FS table first*/
 			dist_size = priv->nb_rx_queues / priv->num_rx_tc;
@@ -3473,20 +3919,20 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
 								   dist_size,
 								   false);
 				if (ret)
-					return ret;
+					goto end_flow_set;
 			}
 
 			/* Configure QoS table then.*/
 			if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) {
 				ret = dpaa2_configure_qos_table(priv, false);
 				if (ret)
-					return ret;
+					goto end_flow_set;
 			}
 
 			if (priv->num_rx_tc > 1) {
 				ret = dpaa2_flow_add_qos_rule(priv, flow);
 				if (ret)
-					return ret;
+					goto end_flow_set;
 			}
 
 			if (flow->tc_index >= priv->fs_entries) {
@@ -3497,7 +3943,7 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
 
 			ret = dpaa2_flow_add_fs_rule(priv, flow);
 			if (ret)
-				return ret;
+				goto end_flow_set;
 
 			break;
 		case RTE_FLOW_ACTION_TYPE_RSS:
@@ -3509,7 +3955,7 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
 			if (ret < 0) {
 				DPAA2_PMD_ERR("TC[%d] distset RSS failed",
 					      flow->tc_id);
-				return ret;
+				goto end_flow_set;
 			}
 
 			dist_size = rss_conf->queue_num;
@@ -3519,22 +3965,22 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
 								   dist_size,
 								   true);
 				if (ret)
-					return ret;
+					goto end_flow_set;
 			}
 
 			if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) {
 				ret = dpaa2_configure_qos_table(priv, true);
 				if (ret)
-					return ret;
+					goto end_flow_set;
 			}
 
 			ret = dpaa2_flow_add_qos_rule(priv, flow);
 			if (ret)
-				return ret;
+				goto end_flow_set;
 
 			ret = dpaa2_flow_add_fs_rule(priv, flow);
 			if (ret)
-				return ret;
+				goto end_flow_set;
 
 			break;
 		case RTE_FLOW_ACTION_TYPE_PF:
@@ -3551,6 +3997,7 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
 		j++;
 	}
 
+end_flow_set:
 	if (!ret) {
 		/* New rules are inserted. */
 		if (!curr) {
@@ -3561,6 +4008,10 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
 			LIST_INSERT_AFTER(curr, flow, next);
 		}
 	}
+
+	if (dpaa2_pattern)
+		rte_free(dpaa2_pattern);
+
 	return ret;
 }
 
-- 
2.25.1


  parent reply	other threads:[~2024-09-18  7:55 UTC|newest]

Thread overview: 88+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-13  5:59 [v1 00/43] DPAA2 specific patches vanshika.shukla
2024-09-13  5:59 ` [v1 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-13  5:59 ` [v1 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-13  5:59 ` [v1 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-13  5:59 ` [v1 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-13  5:59 ` [v1 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-13  5:59 ` [v1 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-13  5:59 ` [v1 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-13  5:59 ` [v1 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-13  5:59 ` [v1 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-13  5:59 ` [v1 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-13  5:59 ` [v1 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-13  5:59 ` [v1 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-13  5:59 ` [v1 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-13  5:59 ` [v1 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-13  5:59 ` [v1 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-13  5:59 ` [v1 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-13  5:59 ` [v1 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-13  5:59 ` [v1 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-13  5:59 ` [v1 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-13  5:59 ` [v1 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-13  5:59 ` [v1 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-13  5:59 ` [v1 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-13  5:59 ` [v1 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-13  5:59 ` [v1 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-13  5:59 ` [v1 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-13  5:59 ` [v1 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-13  5:59 ` [v1 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-13  5:59 ` [v1 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-13  5:59 ` [v1 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-13  5:59 ` [v1 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-13  5:59 ` [v1 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-13  5:59 ` [v1 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-13  5:59 ` [v1 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-13  5:59 ` [v1 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-13  5:59 ` [v1 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-13  5:59 ` [v1 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-13  5:59 ` [v1 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-13  5:59 ` [v1 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-13  5:59 ` [v1 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-13  5:59 ` [v1 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-13  5:59 ` [v1 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-13  5:59 ` [v1 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-13  5:59 ` [v1 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-09-18  7:50 ` [v2 00/43] DPAA2 specific patches vanshika.shukla
2024-09-18  7:50   ` [v2 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-18  7:50   ` [v2 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-18  7:50   ` [v2 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-18  7:50   ` [v2 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-18  7:50   ` [v2 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-18  7:50   ` [v2 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-18  7:50   ` [v2 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-18  7:50   ` [v2 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-18  7:50   ` [v2 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-18  7:50   ` [v2 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-18  7:50   ` [v2 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-18  7:50   ` [v2 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-18  7:50   ` [v2 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-18  7:50   ` [v2 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-18  7:50   ` [v2 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-18  7:50   ` [v2 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-18  7:50   ` [v2 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-18  7:50   ` [v2 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-18  7:50   ` [v2 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-18  7:50   ` [v2 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-18  7:50   ` [v2 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-18  7:50   ` [v2 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-18  7:50   ` [v2 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-18  7:50   ` [v2 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-18  7:50   ` [v2 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-18  7:50   ` [v2 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-18  7:50   ` [v2 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-18  7:50   ` [v2 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-18  7:50   ` vanshika.shukla [this message]
2024-09-18  7:50   ` [v2 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-18  7:50   ` [v2 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-18  7:50   ` [v2 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-18  7:50   ` [v2 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-18  7:50   ` [v2 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-18  7:50   ` [v2 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-18  7:50   ` [v2 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-18  7:50   ` [v2 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-18  7:50   ` [v2 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-18  7:50   ` [v2 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-18  7:50   ` [v2 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-18  7:50   ` [v2 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-18  7:50   ` [v2 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-18  7:50   ` [v2 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240918075056.1838654-30-vanshika.shukla@nxp.com \
    --to=vanshika.shukla@nxp.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=jun.yang@nxp.com \
    --cc=sachin.saxena@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).