DPDK patches and discussions
 help / color / mirror / Atom feed
From: vanshika.shukla@nxp.com
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
	Sachin Saxena <sachin.saxena@nxp.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [v2 43/43] net/dpaa2: dpdmux single flow/multiple rules support
Date: Wed, 18 Sep 2024 13:20:56 +0530	[thread overview]
Message-ID: <20240918075056.1838654-44-vanshika.shukla@nxp.com> (raw)
In-Reply-To: <20240918075056.1838654-1-vanshika.shukla@nxp.com>

From: Jun Yang <jun.yang@nxp.com>

Support multiple extractions as well as hardware descriptions
instead of hard code.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.h     |   1 +
 drivers/net/dpaa2/dpaa2_flow.c       |  22 --
 drivers/net/dpaa2/dpaa2_mux.c        | 395 ++++++++++++++++-----------
 drivers/net/dpaa2/dpaa2_parse_dump.h |   2 +
 drivers/net/dpaa2/rte_pmd_dpaa2.h    |   8 +-
 5 files changed, 247 insertions(+), 181 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index fd6bad7f74..fd3119247a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -198,6 +198,7 @@ enum dpaa2_rx_faf_offset {
 	FAF_IPV4_FRAM = 34 + DPAA2_FAFE_PSR_SIZE * 8,
 	FAF_IPV6_FRAM = 42 + DPAA2_FAFE_PSR_SIZE * 8,
 	FAF_IP_FRAM = 48 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_IP_FRAG_FRAM = 50 + DPAA2_FAFE_PSR_SIZE * 8,
 	FAF_ICMP_FRAM = 57 + DPAA2_FAFE_PSR_SIZE * 8,
 	FAF_IGMP_FRAM = 58 + DPAA2_FAFE_PSR_SIZE * 8,
 	FAF_GRE_FRAM = 65 + DPAA2_FAFE_PSR_SIZE * 8,
diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c
index 54f38e2e25..9dd9163880 100644
--- a/drivers/net/dpaa2/dpaa2_flow.c
+++ b/drivers/net/dpaa2/dpaa2_flow.c
@@ -98,13 +98,6 @@ enum rte_flow_action_type dpaa2_supported_action_type[] = {
 	RTE_FLOW_ACTION_TYPE_RSS
 };
 
-static const
-enum rte_flow_action_type dpaa2_supported_fs_action_type[] = {
-	RTE_FLOW_ACTION_TYPE_QUEUE,
-	RTE_FLOW_ACTION_TYPE_PORT_ID,
-	RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
-};
-
 #ifndef __cplusplus
 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
 	.hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
@@ -4083,21 +4076,6 @@ dpaa2_configure_flow_raw(struct dpaa2_dev_flow *flow,
 	return 0;
 }
 
-static inline int
-dpaa2_fs_action_supported(enum rte_flow_action_type action)
-{
-	int i;
-	int action_num = sizeof(dpaa2_supported_fs_action_type) /
-		sizeof(enum rte_flow_action_type);
-
-	for (i = 0; i < action_num; i++) {
-		if (action == dpaa2_supported_fs_action_type[i])
-			return true;
-	}
-
-	return false;
-}
-
 static inline int
 dpaa2_flow_verify_attr(struct dpaa2_dev_priv *priv,
 	const struct rte_flow_attr *attr)
diff --git a/drivers/net/dpaa2/dpaa2_mux.c b/drivers/net/dpaa2/dpaa2_mux.c
index 5c37701939..79a1c7f981 100644
--- a/drivers/net/dpaa2/dpaa2_mux.c
+++ b/drivers/net/dpaa2/dpaa2_mux.c
@@ -32,8 +32,9 @@ struct dpaa2_dpdmux_dev {
 	uint8_t num_ifs;   /* Number of interfaces in DPDMUX */
 };
 
-struct rte_flow {
-	struct dpdmux_rule_cfg rule;
+#define DPAA2_MUX_FLOW_MAX_RULE_NUM 8
+struct dpaa2_mux_flow {
+	struct dpdmux_rule_cfg rule[DPAA2_MUX_FLOW_MAX_RULE_NUM];
 };
 
 TAILQ_HEAD(dpdmux_dev_list, dpaa2_dpdmux_dev);
@@ -53,204 +54,287 @@ static struct dpaa2_dpdmux_dev *get_dpdmux_from_id(uint32_t dpdmux_id)
 	return dpdmux_dev;
 }
 
-struct rte_flow *
+int
 rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,
-			      struct rte_flow_item *pattern[],
-			      struct rte_flow_action *actions[])
+	struct rte_flow_item pattern[],
+	struct rte_flow_action actions[])
 {
 	struct dpaa2_dpdmux_dev *dpdmux_dev;
+	static struct dpkg_profile_cfg s_kg_cfg;
 	struct dpkg_profile_cfg kg_cfg;
 	const struct rte_flow_action_vf *vf_conf;
 	struct dpdmux_cls_action dpdmux_action;
-	struct rte_flow *flow = NULL;
-	void *key_iova, *mask_iova, *key_cfg_iova = NULL;
+	uint8_t *key_va = NULL, *mask_va = NULL;
+	void *key_cfg_va = NULL;
+	uint64_t key_iova, mask_iova, key_cfg_iova;
 	uint8_t key_size = 0;
-	int ret;
-	static int i;
+	int ret = 0, loop = 0;
+	static int s_i;
+	struct dpkg_extract *extract;
+	struct dpdmux_rule_cfg rule;
 
-	if (!pattern || !actions || !pattern[0] || !actions[0])
-		return NULL;
+	memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
 
 	/* Find the DPDMUX from dpdmux_id in our list */
 	dpdmux_dev = get_dpdmux_from_id(dpdmux_id);
 	if (!dpdmux_dev) {
 		DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id);
-		return NULL;
+		ret = -ENODEV;
+		goto creation_error;
 	}
 
-	key_cfg_iova = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE,
-				   RTE_CACHE_LINE_SIZE);
-	if (!key_cfg_iova) {
-		DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
-		return NULL;
+	key_cfg_va = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE,
+				RTE_CACHE_LINE_SIZE);
+	if (!key_cfg_va) {
+		DPAA2_PMD_ERR("Unable to allocate key configure buffer");
+		ret = -ENOMEM;
+		goto creation_error;
+	}
+
+	key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_cfg_va,
+		DIST_PARAM_IOVA_SIZE);
+	if (key_cfg_iova == RTE_BAD_IOVA) {
+		DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)",
+			__func__, key_cfg_va);
+		ret = -ENOBUFS;
+		goto creation_error;
 	}
-	flow = rte_zmalloc(NULL, sizeof(struct rte_flow) +
-			   (2 * DIST_PARAM_IOVA_SIZE), RTE_CACHE_LINE_SIZE);
-	if (!flow) {
-		DPAA2_PMD_ERR(
-			"Memory allocation failure for rule configuration\n");
+
+	key_va = rte_zmalloc(NULL, (2 * DIST_PARAM_IOVA_SIZE),
+		RTE_CACHE_LINE_SIZE);
+	if (!key_va) {
+		DPAA2_PMD_ERR("Unable to allocate flow dist parameter");
+		ret = -ENOMEM;
 		goto creation_error;
 	}
-	key_iova = (void *)((size_t)flow + sizeof(struct rte_flow));
-	mask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE);
+
+	key_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_va,
+		(2 * DIST_PARAM_IOVA_SIZE));
+	if (key_iova == RTE_BAD_IOVA) {
+		DPAA2_PMD_ERR("%s: No IOMMU mapping for address(%p)",
+			__func__, key_va);
+		ret = -ENOBUFS;
+		goto creation_error;
+	}
+
+	mask_va = key_va + DIST_PARAM_IOVA_SIZE;
+	mask_iova = key_iova + DIST_PARAM_IOVA_SIZE;
 
 	/* Currently taking only IP protocol as an extract type.
-	 * This can be extended to other fields using pattern->type.
+	 * This can be exended to other fields using pattern->type.
 	 */
 	memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
 
-	switch (pattern[0]->type) {
-	case RTE_FLOW_ITEM_TYPE_IPV4:
-	{
-		const struct rte_flow_item_ipv4 *spec;
-
-		kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_IP;
-		kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_IP_PROTO;
-		kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
-		kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
-		kg_cfg.num_extracts = 1;
-
-		spec = (const struct rte_flow_item_ipv4 *)pattern[0]->spec;
-		memcpy(key_iova, (const void *)(&spec->hdr.next_proto_id),
-			sizeof(uint8_t));
-		memcpy(mask_iova, pattern[0]->mask, sizeof(uint8_t));
-		key_size = sizeof(uint8_t);
-	}
-	break;
-
-	case RTE_FLOW_ITEM_TYPE_VLAN:
-	{
-		const struct rte_flow_item_vlan *spec;
-
-		kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
-		kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
-		kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
-		kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FROM_FIELD;
-		kg_cfg.extracts[0].extract.from_hdr.offset = 1;
-		kg_cfg.extracts[0].extract.from_hdr.size = 1;
-		kg_cfg.num_extracts = 1;
-
-		spec = (const struct rte_flow_item_vlan *)pattern[0]->spec;
-		memcpy((void *)key_iova, (const void *)(&spec->hdr.vlan_tci),
-			sizeof(uint16_t));
-		memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t));
-		key_size = sizeof(uint16_t);
-	}
-	break;
-
-	case RTE_FLOW_ITEM_TYPE_UDP:
-	{
-		const struct rte_flow_item_udp *spec;
-		uint16_t udp_dst_port;
-
-		kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_UDP;
-		kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
-		kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
-		kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
-		kg_cfg.num_extracts = 1;
-
-		spec = (const struct rte_flow_item_udp *)pattern[0]->spec;
-		udp_dst_port = rte_constant_bswap16(spec->hdr.dst_port);
-		memcpy((void *)key_iova, (const void *)&udp_dst_port,
-							sizeof(rte_be16_t));
-		memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t));
-		key_size = sizeof(uint16_t);
-	}
-	break;
-
-	case RTE_FLOW_ITEM_TYPE_ETH:
-	{
-		const struct rte_flow_item_eth *spec;
-		uint16_t eth_type;
-
-		kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_ETH;
-		kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_ETH_TYPE;
-		kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
-		kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
-		kg_cfg.num_extracts = 1;
-
-		spec = (const struct rte_flow_item_eth *)pattern[0]->spec;
-		eth_type = rte_constant_bswap16(spec->hdr.ether_type);
-		memcpy((void *)key_iova, (const void *)&eth_type,
-							sizeof(rte_be16_t));
-		memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t));
-		key_size = sizeof(uint16_t);
-	}
-	break;
-
-	case RTE_FLOW_ITEM_TYPE_RAW:
-	{
-		const struct rte_flow_item_raw *spec;
-
-		spec = (const struct rte_flow_item_raw *)pattern[0]->spec;
-		kg_cfg.extracts[0].extract.from_data.offset = spec->offset;
-		kg_cfg.extracts[0].extract.from_data.size = spec->length;
-		kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA;
-		kg_cfg.num_extracts = 1;
-		memcpy((void *)key_iova, (const void *)spec->pattern,
-							spec->length);
-		memcpy(mask_iova, pattern[0]->mask, spec->length);
-
-		key_size = spec->length;
-	}
-	break;
+	while (pattern[loop].type != RTE_FLOW_ITEM_TYPE_END) {
+		if (kg_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
+			DPAA2_PMD_ERR("Too many extracts(%d)",
+				kg_cfg.num_extracts);
+			ret = -ENOTSUP;
+			goto creation_error;
+		}
+		switch (pattern[loop].type) {
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+		{
+			const struct rte_flow_item_ipv4 *spec;
+			const struct rte_flow_item_ipv4 *mask;
+
+			extract = &kg_cfg.extracts[kg_cfg.num_extracts];
+			extract->type = DPKG_EXTRACT_FROM_HDR;
+			extract->extract.from_hdr.prot = NET_PROT_IP;
+			extract->extract.from_hdr.field = NH_FLD_IP_PROTO;
+			extract->extract.from_hdr.type = DPKG_FULL_FIELD;
+
+			kg_cfg.num_extracts++;
+
+			spec = pattern[loop].spec;
+			mask = pattern[loop].mask;
+			rte_memcpy(&key_va[key_size],
+				&spec->hdr.next_proto_id, sizeof(uint8_t));
+			if (mask) {
+				rte_memcpy(&mask_va[key_size],
+					&mask->hdr.next_proto_id,
+					sizeof(uint8_t));
+			} else {
+				mask_va[key_size] = 0xff;
+			}
+			key_size += sizeof(uint8_t);
+		}
+		break;
+
+		case RTE_FLOW_ITEM_TYPE_VLAN:
+		{
+			const struct rte_flow_item_vlan *spec;
+			const struct rte_flow_item_vlan *mask;
+
+			extract = &kg_cfg.extracts[kg_cfg.num_extracts];
+			extract->type = DPKG_EXTRACT_FROM_HDR;
+			extract->extract.from_hdr.prot = NET_PROT_VLAN;
+			extract->extract.from_hdr.field = NH_FLD_VLAN_TCI;
+			extract->extract.from_hdr.type = DPKG_FULL_FIELD;
+
+			kg_cfg.num_extracts++;
+
+			spec = pattern[loop].spec;
+			mask = pattern[loop].mask;
+			rte_memcpy(&key_va[key_size],
+				&spec->tci, sizeof(uint16_t));
+			if (mask) {
+				rte_memcpy(&mask_va[key_size],
+					&mask->tci, sizeof(uint16_t));
+			} else {
+				memset(&mask_va[key_size], 0xff,
+					sizeof(rte_be16_t));
+			}
+			key_size += sizeof(uint16_t);
+		}
+		break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+		{
+			const struct rte_flow_item_udp *spec;
+			const struct rte_flow_item_udp *mask;
+
+			extract = &kg_cfg.extracts[kg_cfg.num_extracts];
+			extract->type = DPKG_EXTRACT_FROM_HDR;
+			extract->extract.from_hdr.prot = NET_PROT_UDP;
+			extract->extract.from_hdr.type = DPKG_FULL_FIELD;
+			extract->extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
+			kg_cfg.num_extracts++;
+
+			spec = pattern[loop].spec;
+			mask = pattern[loop].mask;
+			rte_memcpy(&key_va[key_size],
+				&spec->hdr.dst_port, sizeof(rte_be16_t));
+			if (mask) {
+				rte_memcpy(&mask_va[key_size],
+					&mask->hdr.dst_port,
+					sizeof(rte_be16_t));
+			} else {
+				memset(&mask_va[key_size], 0xff,
+					sizeof(rte_be16_t));
+			}
+			key_size += sizeof(rte_be16_t);
+		}
+		break;
+
+		case RTE_FLOW_ITEM_TYPE_ETH:
+		{
+			const struct rte_flow_item_eth *spec;
+			const struct rte_flow_item_eth *mask;
+
+			extract = &kg_cfg.extracts[kg_cfg.num_extracts];
+			extract->type = DPKG_EXTRACT_FROM_HDR;
+			extract->extract.from_hdr.prot = NET_PROT_ETH;
+			extract->extract.from_hdr.type = DPKG_FULL_FIELD;
+			extract->extract.from_hdr.field = NH_FLD_ETH_TYPE;
+			kg_cfg.num_extracts++;
+
+			spec = pattern[loop].spec;
+			mask = pattern[loop].mask;
+			rte_memcpy(&key_va[key_size],
+				&spec->type, sizeof(rte_be16_t));
+			if (mask) {
+				rte_memcpy(&mask_va[key_size],
+					&mask->type, sizeof(rte_be16_t));
+			} else {
+				memset(&mask_va[key_size], 0xff,
+					sizeof(rte_be16_t));
+			}
+			key_size += sizeof(rte_be16_t);
+		}
+		break;
+
+		case RTE_FLOW_ITEM_TYPE_RAW:
+		{
+			const struct rte_flow_item_raw *spec;
+			const struct rte_flow_item_raw *mask;
+
+			spec = pattern[loop].spec;
+			mask = pattern[loop].mask;
+			extract = &kg_cfg.extracts[kg_cfg.num_extracts];
+			extract->type = DPKG_EXTRACT_FROM_DATA;
+			extract->extract.from_data.offset = spec->offset;
+			extract->extract.from_data.size = spec->length;
+			kg_cfg.num_extracts++;
+
+			rte_memcpy(&key_va[key_size],
+				spec->pattern, spec->length);
+			if (mask && mask->pattern) {
+				rte_memcpy(&mask_va[key_size],
+					mask->pattern, spec->length);
+			} else {
+				memset(&mask_va[key_size], 0xff, spec->length);
+			}
+
+			key_size += spec->length;
+		}
+		break;
 
-	default:
-		DPAA2_PMD_ERR("Not supported pattern type: %d",
-				pattern[0]->type);
-		goto creation_error;
+		default:
+			DPAA2_PMD_ERR("Not supported pattern[%d] type: %d",
+				loop, pattern[loop].type);
+			ret = -ENOTSUP;
+			goto creation_error;
+		}
+		loop++;
 	}
 
-	ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_iova);
+	ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_va);
 	if (ret) {
 		DPAA2_PMD_ERR("dpkg_prepare_key_cfg failed: err(%d)", ret);
 		goto creation_error;
 	}
 
-	/* Multiple rules with same DPKG extracts (kg_cfg.extracts) like same
-	 * offset and length values in raw is supported right now. Different
-	 * values of kg_cfg may not work.
-	 */
-	if (i == 0) {
-		ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
-					    dpdmux_dev->token,
-				(uint64_t)(DPAA2_VADDR_TO_IOVA(key_cfg_iova)));
+	if (!s_i) {
+		ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux,
+				CMD_PRI_LOW, dpdmux_dev->token, key_cfg_iova);
 		if (ret) {
 			DPAA2_PMD_ERR("dpdmux_set_custom_key failed: err(%d)",
-					ret);
+				ret);
+			goto creation_error;
+		}
+		rte_memcpy(&s_kg_cfg, &kg_cfg, sizeof(struct dpkg_profile_cfg));
+	} else {
+		if (memcmp(&s_kg_cfg, &kg_cfg,
+			sizeof(struct dpkg_profile_cfg))) {
+			DPAA2_PMD_ERR("%s: Single flow support only.",
+				__func__);
+			ret = -ENOTSUP;
 			goto creation_error;
 		}
 	}
-	/* As now our key extract parameters are set, let us configure
-	 * the rule.
-	 */
-	flow->rule.key_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(key_iova));
-	flow->rule.mask_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(mask_iova));
-	flow->rule.key_size = key_size;
-	flow->rule.entry_index = i++;
 
-	vf_conf = (const struct rte_flow_action_vf *)(actions[0]->conf);
+	vf_conf = actions[0].conf;
 	if (vf_conf->id == 0 || vf_conf->id > dpdmux_dev->num_ifs) {
-		DPAA2_PMD_ERR("Invalid destination id\n");
+		DPAA2_PMD_ERR("Invalid destination id(%d)", vf_conf->id);
 		goto creation_error;
 	}
 	dpdmux_action.dest_if = vf_conf->id;
 
-	ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
-					  dpdmux_dev->token, &flow->rule,
-					  &dpdmux_action);
+	rule.key_iova = key_iova;
+	rule.mask_iova = mask_iova;
+	rule.key_size = key_size;
+	rule.entry_index = s_i;
+	s_i++;
+
+	/* As now our key extract parameters are set, let us configure
+	 * the rule.
+	 */
+	ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux,
+			CMD_PRI_LOW, dpdmux_dev->token,
+			&rule, &dpdmux_action);
 	if (ret) {
-		DPAA2_PMD_ERR("dpdmux_add_custom_cls_entry failed: err(%d)",
-			      ret);
+		DPAA2_PMD_ERR("Add classification entry failed:err(%d)", ret);
 		goto creation_error;
 	}
 
-	return flow;
-
 creation_error:
-	rte_free((void *)key_cfg_iova);
-	rte_free((void *)flow);
-	return NULL;
+	if (key_cfg_va)
+		rte_free(key_cfg_va);
+	if (key_va)
+		rte_free(key_va);
+
+	return ret;
 }
 
 int
@@ -407,10 +491,11 @@ dpaa2_create_dpdmux_device(int vdev_fd __rte_unused,
 	PMD_INIT_FUNC_TRACE();
 
 	/* Allocate DPAA2 dpdmux handle */
-	dpdmux_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpdmux_dev), 0);
+	dpdmux_dev = rte_zmalloc(NULL,
+		sizeof(struct dpaa2_dpdmux_dev), RTE_CACHE_LINE_SIZE);
 	if (!dpdmux_dev) {
 		DPAA2_PMD_ERR("Memory allocation failed for DPDMUX Device");
-		return -1;
+		return -ENOMEM;
 	}
 
 	/* Open the dpdmux object */
diff --git a/drivers/net/dpaa2/dpaa2_parse_dump.h b/drivers/net/dpaa2/dpaa2_parse_dump.h
index f1cdc003de..78fd3b768c 100644
--- a/drivers/net/dpaa2/dpaa2_parse_dump.h
+++ b/drivers/net/dpaa2/dpaa2_parse_dump.h
@@ -105,6 +105,8 @@ dpaa2_print_faf(struct dpaa2_fapr_array *fapr)
 			faf_bits[i].name = "IPv4 1 Present";
 		else if (i == FAF_IPV6_FRAM)
 			faf_bits[i].name = "IPv6 1 Present";
+		else if (i == FAF_IP_FRAG_FRAM)
+			faf_bits[i].name = "IP fragment Present";
 		else if (i == FAF_UDP_FRAM)
 			faf_bits[i].name = "UDP Present";
 		else if (i == FAF_TCP_FRAM)
diff --git a/drivers/net/dpaa2/rte_pmd_dpaa2.h b/drivers/net/dpaa2/rte_pmd_dpaa2.h
index f93af1c65f..237c3cd6e7 100644
--- a/drivers/net/dpaa2/rte_pmd_dpaa2.h
+++ b/drivers/net/dpaa2/rte_pmd_dpaa2.h
@@ -26,12 +26,12 @@
  *    Associated actions.
  *
  * @return
- *    A valid handle in case of success, NULL otherwise.
+ *    0 in case of success,  otherwise failure.
  */
-struct rte_flow *
+int
 rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,
-			      struct rte_flow_item *pattern[],
-			      struct rte_flow_action *actions[]);
+	struct rte_flow_item pattern[],
+	struct rte_flow_action actions[]);
 int
 rte_pmd_dpaa2_mux_flow_destroy(uint32_t dpdmux_id,
 	uint16_t entry_index);
-- 
2.25.1


      parent reply	other threads:[~2024-09-18  7:57 UTC|newest]

Thread overview: 88+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-13  5:59 [v1 00/43] DPAA2 specific patches vanshika.shukla
2024-09-13  5:59 ` [v1 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-13  5:59 ` [v1 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-13  5:59 ` [v1 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-13  5:59 ` [v1 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-13  5:59 ` [v1 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-13  5:59 ` [v1 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-13  5:59 ` [v1 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-13  5:59 ` [v1 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-13  5:59 ` [v1 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-13  5:59 ` [v1 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-13  5:59 ` [v1 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-13  5:59 ` [v1 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-13  5:59 ` [v1 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-13  5:59 ` [v1 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-13  5:59 ` [v1 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-13  5:59 ` [v1 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-13  5:59 ` [v1 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-13  5:59 ` [v1 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-13  5:59 ` [v1 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-13  5:59 ` [v1 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-13  5:59 ` [v1 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-13  5:59 ` [v1 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-13  5:59 ` [v1 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-13  5:59 ` [v1 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-13  5:59 ` [v1 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-13  5:59 ` [v1 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-13  5:59 ` [v1 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-13  5:59 ` [v1 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-13  5:59 ` [v1 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-13  5:59 ` [v1 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-13  5:59 ` [v1 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-13  5:59 ` [v1 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-13  5:59 ` [v1 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-13  5:59 ` [v1 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-13  5:59 ` [v1 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-13  5:59 ` [v1 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-13  5:59 ` [v1 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-13  5:59 ` [v1 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-13  5:59 ` [v1 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-13  5:59 ` [v1 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-13  5:59 ` [v1 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-13  5:59 ` [v1 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-13  5:59 ` [v1 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-09-18  7:50 ` [v2 00/43] DPAA2 specific patches vanshika.shukla
2024-09-18  7:50   ` [v2 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-18  7:50   ` [v2 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-18  7:50   ` [v2 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-18  7:50   ` [v2 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-18  7:50   ` [v2 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-18  7:50   ` [v2 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-18  7:50   ` [v2 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-18  7:50   ` [v2 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-18  7:50   ` [v2 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-18  7:50   ` [v2 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-18  7:50   ` [v2 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-18  7:50   ` [v2 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-18  7:50   ` [v2 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-18  7:50   ` [v2 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-18  7:50   ` [v2 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-18  7:50   ` [v2 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-18  7:50   ` [v2 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-18  7:50   ` [v2 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-18  7:50   ` [v2 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-18  7:50   ` [v2 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-18  7:50   ` [v2 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-18  7:50   ` [v2 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-18  7:50   ` [v2 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-18  7:50   ` [v2 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-18  7:50   ` [v2 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-18  7:50   ` [v2 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-18  7:50   ` [v2 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-18  7:50   ` [v2 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-18  7:50   ` [v2 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-18  7:50   ` [v2 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-18  7:50   ` [v2 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-18  7:50   ` [v2 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-18  7:50   ` [v2 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-18  7:50   ` [v2 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-18  7:50   ` [v2 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-18  7:50   ` [v2 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-18  7:50   ` [v2 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-18  7:50   ` [v2 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-18  7:50   ` [v2 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-18  7:50   ` [v2 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-18  7:50   ` [v2 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-18  7:50   ` [v2 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-18  7:50   ` vanshika.shukla [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240918075056.1838654-44-vanshika.shukla@nxp.com \
    --to=vanshika.shukla@nxp.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=jun.yang@nxp.com \
    --cc=sachin.saxena@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).