DPDK patches and discussions
 help / color / mirror / Atom feed
From: vanshika.shukla@nxp.com
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
	Sachin Saxena <sachin.saxena@nxp.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [v1 30/43] net/dpaa2: eCPRI support by parser result
Date: Fri, 13 Sep 2024 11:29:46 +0530	[thread overview]
Message-ID: <20240913055959.3246917-31-vanshika.shukla@nxp.com> (raw)
In-Reply-To: <20240913055959.3246917-1-vanshika.shukla@nxp.com>

From: Jun Yang <jun.yang@nxp.com>

Soft parser extracts ECPRI header and message to specified
areas of parser result.
Flow is classified according to the ECPRI extracts from praser result.
This implementation supports ECPRI over ethernet/vlan/UDP and various
types/messages combinations.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.h |  18 ++
 drivers/net/dpaa2/dpaa2_flow.c   | 348 ++++++++++++++++++++++++++++++-
 2 files changed, 365 insertions(+), 1 deletion(-)

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index aeddcfdfa9..eaa653d266 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -179,6 +179,8 @@ enum dpaa2_rx_faf_offset {
 	FAFE_VXLAN_IN_IPV6_FRAM = 2,
 	FAFE_VXLAN_IN_UDP_FRAM = 3,
 	FAFE_VXLAN_IN_TCP_FRAM = 4,
+
+	FAFE_ECPRI_FRAM = 7,
 	/* Set by SP end*/
 
 	FAF_GTP_PRIMED_FRAM = 1 + DPAA2_FAFE_PSR_SIZE * 8,
@@ -207,6 +209,17 @@ enum dpaa2_rx_faf_offset {
 	FAF_ESP_FRAM = 89 + DPAA2_FAFE_PSR_SIZE * 8,
 };
 
+enum dpaa2_ecpri_fafe_type {
+	ECPRI_FAFE_TYPE_0 = (8 - FAFE_ECPRI_FRAM),
+	ECPRI_FAFE_TYPE_1 = (8 - FAFE_ECPRI_FRAM) | (1 << 1),
+	ECPRI_FAFE_TYPE_2 = (8 - FAFE_ECPRI_FRAM) | (2 << 1),
+	ECPRI_FAFE_TYPE_3 = (8 - FAFE_ECPRI_FRAM) | (3 << 1),
+	ECPRI_FAFE_TYPE_4 = (8 - FAFE_ECPRI_FRAM) | (4 << 1),
+	ECPRI_FAFE_TYPE_5 = (8 - FAFE_ECPRI_FRAM) | (5 << 1),
+	ECPRI_FAFE_TYPE_6 = (8 - FAFE_ECPRI_FRAM) | (6 << 1),
+	ECPRI_FAFE_TYPE_7 = (8 - FAFE_ECPRI_FRAM) | (7 << 1)
+};
+
 #define DPAA2_PR_ETH_OFF_OFFSET 19
 #define DPAA2_PR_TCI_OFF_OFFSET 21
 #define DPAA2_PR_LAST_ETYPE_OFFSET 23
@@ -236,6 +249,11 @@ enum dpaa2_rx_faf_offset {
 #define DPAA2_VXLAN_IN_TYPE_OFFSET 46
 /* Set by SP for vxlan distribution end*/
 
+/* ECPRI shares SP context with VXLAN*/
+#define DPAA2_ECPRI_MSG_OFFSET DPAA2_VXLAN_VNI_OFFSET
+
+#define DPAA2_ECPRI_MAX_EXTRACT_NB 8
+
 struct ipv4_sd_addr_extract_rule {
 	uint32_t ipv4_src;
 	uint32_t ipv4_dst;
diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c
index e4d7117192..e4fffdbf33 100644
--- a/drivers/net/dpaa2/dpaa2_flow.c
+++ b/drivers/net/dpaa2/dpaa2_flow.c
@@ -156,6 +156,13 @@ static const struct rte_flow_item_vxlan dpaa2_flow_item_vxlan_mask = {
 	.flags = 0xff,
 	.vni = "\xff\xff\xff",
 };
+
+static const struct rte_flow_item_ecpri dpaa2_flow_item_ecpri_mask = {
+	.hdr.common.type = 0xff,
+	.hdr.dummy[0] = RTE_BE32(0xffffffff),
+	.hdr.dummy[1] = RTE_BE32(0xffffffff),
+	.hdr.dummy[2] = RTE_BE32(0xffffffff),
+};
 #endif
 
 #define DPAA2_FLOW_DUMP printf
@@ -1556,6 +1563,10 @@ dpaa2_flow_extract_support(const uint8_t *mask_src,
 		mask_support = (const char *)&dpaa2_flow_item_vxlan_mask;
 		size = sizeof(struct rte_flow_item_vxlan);
 		break;
+	case RTE_FLOW_ITEM_TYPE_ECPRI:
+		mask_support = (const char *)&dpaa2_flow_item_ecpri_mask;
+		size = sizeof(struct rte_flow_item_ecpri);
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -3238,6 +3249,330 @@ dpaa2_configure_flow_vxlan(struct dpaa2_dev_flow *flow,
 	return 0;
 }
 
+static int
+dpaa2_configure_flow_ecpri(struct dpaa2_dev_flow *flow,
+	struct rte_eth_dev *dev,
+	const struct rte_flow_attr *attr,
+	const struct rte_dpaa2_flow_item *dpaa2_pattern,
+	const struct rte_flow_action actions[] __rte_unused,
+	struct rte_flow_error *error __rte_unused,
+	int *device_configured)
+{
+	int ret, local_cfg = 0;
+	uint32_t group;
+	const struct rte_flow_item_ecpri *spec, *mask;
+	struct rte_flow_item_ecpri local_mask;
+	struct dpaa2_dev_priv *priv = dev->data->dev_private;
+	const struct rte_flow_item *pattern =
+		&dpaa2_pattern->generic_item;
+	uint8_t extract_nb = 0, i;
+	uint64_t rule_data[DPAA2_ECPRI_MAX_EXTRACT_NB];
+	uint64_t mask_data[DPAA2_ECPRI_MAX_EXTRACT_NB];
+	uint8_t extract_size[DPAA2_ECPRI_MAX_EXTRACT_NB];
+	uint8_t extract_off[DPAA2_ECPRI_MAX_EXTRACT_NB];
+
+	group = attr->group;
+
+	/* Parse pattern list to get the matching parameters */
+	spec = pattern->spec;
+	if (pattern->mask) {
+		memcpy(&local_mask, pattern->mask,
+			sizeof(struct rte_flow_item_ecpri));
+		local_mask.hdr.common.u32 =
+			rte_be_to_cpu_32(local_mask.hdr.common.u32);
+		mask = &local_mask;
+	} else {
+		mask = &dpaa2_flow_item_ecpri_mask;
+	}
+
+	/* Get traffic class index and flow id to be configured */
+	flow->tc_id = group;
+	flow->tc_index = attr->priority;
+
+	if (dpaa2_pattern->in_tunnel) {
+		DPAA2_PMD_ERR("Tunnel-ECPRI distribution not support");
+		return -ENOTSUP;
+	}
+
+	if (!spec) {
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+			FAFE_ECPRI_FRAM, DPAA2_FLOW_QOS_TYPE,
+			group, &local_cfg);
+		if (ret)
+			return ret;
+
+		ret = dpaa2_flow_identify_by_faf(priv, flow,
+			FAFE_ECPRI_FRAM, DPAA2_FLOW_FS_TYPE,
+			group, &local_cfg);
+		if (ret)
+			return ret;
+
+		(*device_configured) |= local_cfg;
+		return 0;
+	}
+
+	if (dpaa2_flow_extract_support((const uint8_t *)mask,
+		RTE_FLOW_ITEM_TYPE_ECPRI)) {
+		DPAA2_PMD_WARN("Extract field(s) of ECPRI not support.");
+
+		return -1;
+	}
+
+	if (mask->hdr.common.type != 0xff) {
+		DPAA2_PMD_WARN("ECPRI header type not specified.");
+
+		return -1;
+	}
+
+	if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_IQ_DATA) {
+		rule_data[extract_nb] = ECPRI_FAFE_TYPE_0;
+		mask_data[extract_nb] = 0xff;
+		extract_size[extract_nb] = sizeof(uint8_t);
+		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
+		extract_nb++;
+
+		if (mask->hdr.type0.pc_id) {
+			rule_data[extract_nb] = spec->hdr.type0.pc_id;
+			mask_data[extract_nb] = mask->hdr.type0.pc_id;
+			extract_size[extract_nb] = sizeof(rte_be16_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_iq_data, pc_id);
+			extract_nb++;
+		}
+		if (mask->hdr.type0.seq_id) {
+			rule_data[extract_nb] = spec->hdr.type0.seq_id;
+			mask_data[extract_nb] = mask->hdr.type0.seq_id;
+			extract_size[extract_nb] = sizeof(rte_be16_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_iq_data, seq_id);
+			extract_nb++;
+		}
+	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_BIT_SEQ) {
+		rule_data[extract_nb] = ECPRI_FAFE_TYPE_1;
+		mask_data[extract_nb] = 0xff;
+		extract_size[extract_nb] = sizeof(uint8_t);
+		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
+		extract_nb++;
+
+		if (mask->hdr.type1.pc_id) {
+			rule_data[extract_nb] = spec->hdr.type1.pc_id;
+			mask_data[extract_nb] = mask->hdr.type1.pc_id;
+			extract_size[extract_nb] = sizeof(rte_be16_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_bit_seq, pc_id);
+			extract_nb++;
+		}
+		if (mask->hdr.type1.seq_id) {
+			rule_data[extract_nb] = spec->hdr.type1.seq_id;
+			mask_data[extract_nb] = mask->hdr.type1.seq_id;
+			extract_size[extract_nb] = sizeof(rte_be16_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_bit_seq, seq_id);
+			extract_nb++;
+		}
+	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RTC_CTRL) {
+		rule_data[extract_nb] = ECPRI_FAFE_TYPE_2;
+		mask_data[extract_nb] = 0xff;
+		extract_size[extract_nb] = sizeof(uint8_t);
+		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
+		extract_nb++;
+
+		if (mask->hdr.type2.rtc_id) {
+			rule_data[extract_nb] = spec->hdr.type2.rtc_id;
+			mask_data[extract_nb] = mask->hdr.type2.rtc_id;
+			extract_size[extract_nb] = sizeof(rte_be16_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_rtc_ctrl, rtc_id);
+			extract_nb++;
+		}
+		if (mask->hdr.type2.seq_id) {
+			rule_data[extract_nb] = spec->hdr.type2.seq_id;
+			mask_data[extract_nb] = mask->hdr.type2.seq_id;
+			extract_size[extract_nb] = sizeof(rte_be16_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_rtc_ctrl, seq_id);
+			extract_nb++;
+		}
+	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_GEN_DATA) {
+		rule_data[extract_nb] = ECPRI_FAFE_TYPE_3;
+		mask_data[extract_nb] = 0xff;
+		extract_size[extract_nb] = sizeof(uint8_t);
+		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
+		extract_nb++;
+
+		if (mask->hdr.type3.pc_id || mask->hdr.type3.seq_id)
+			DPAA2_PMD_WARN("Extract type3 msg not support.");
+	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RM_ACC) {
+		rule_data[extract_nb] = ECPRI_FAFE_TYPE_4;
+		mask_data[extract_nb] = 0xff;
+		extract_size[extract_nb] = sizeof(uint8_t);
+		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
+		extract_nb++;
+
+		if (mask->hdr.type4.rma_id) {
+			rule_data[extract_nb] = spec->hdr.type4.rma_id;
+			mask_data[extract_nb] = mask->hdr.type4.rma_id;
+			extract_size[extract_nb] = sizeof(uint8_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET + 0;
+				/** Compiler not support to take address
+				 * of bit-field
+				 * offsetof(struct rte_ecpri_msg_rm_access,
+				 * rma_id);
+				 */
+			extract_nb++;
+		}
+		if (mask->hdr.type4.ele_id) {
+			rule_data[extract_nb] = spec->hdr.type4.ele_id;
+			mask_data[extract_nb] = mask->hdr.type4.ele_id;
+			extract_size[extract_nb] = sizeof(rte_be16_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET + 2;
+				/** Compiler not support to take address
+				 * of bit-field
+				 * offsetof(struct rte_ecpri_msg_rm_access,
+				 * ele_id);
+				 */
+			extract_nb++;
+		}
+	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_DLY_MSR) {
+		rule_data[extract_nb] = ECPRI_FAFE_TYPE_5;
+		mask_data[extract_nb] = 0xff;
+		extract_size[extract_nb] = sizeof(uint8_t);
+		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
+		extract_nb++;
+
+		if (mask->hdr.type5.msr_id) {
+			rule_data[extract_nb] = spec->hdr.type5.msr_id;
+			mask_data[extract_nb] = mask->hdr.type5.msr_id;
+			extract_size[extract_nb] = sizeof(uint8_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_delay_measure,
+					msr_id);
+			extract_nb++;
+		}
+		if (mask->hdr.type5.act_type) {
+			rule_data[extract_nb] = spec->hdr.type5.act_type;
+			mask_data[extract_nb] = mask->hdr.type5.act_type;
+			extract_size[extract_nb] = sizeof(uint8_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_delay_measure,
+					act_type);
+			extract_nb++;
+		}
+	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_RMT_RST) {
+		rule_data[extract_nb] = ECPRI_FAFE_TYPE_6;
+		mask_data[extract_nb] = 0xff;
+		extract_size[extract_nb] = sizeof(uint8_t);
+		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
+		extract_nb++;
+
+		if (mask->hdr.type6.rst_id) {
+			rule_data[extract_nb] = spec->hdr.type6.rst_id;
+			mask_data[extract_nb] = mask->hdr.type6.rst_id;
+			extract_size[extract_nb] = sizeof(rte_be16_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_remote_reset,
+					rst_id);
+			extract_nb++;
+		}
+		if (mask->hdr.type6.rst_op) {
+			rule_data[extract_nb] = spec->hdr.type6.rst_op;
+			mask_data[extract_nb] = mask->hdr.type6.rst_op;
+			extract_size[extract_nb] = sizeof(uint8_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_remote_reset,
+					rst_op);
+			extract_nb++;
+		}
+	} else if (spec->hdr.common.type == RTE_ECPRI_MSG_TYPE_EVT_IND) {
+		rule_data[extract_nb] = ECPRI_FAFE_TYPE_7;
+		mask_data[extract_nb] = 0xff;
+		extract_size[extract_nb] = sizeof(uint8_t);
+		extract_off[extract_nb] = DPAA2_FAFE_PSR_OFFSET;
+		extract_nb++;
+
+		if (mask->hdr.type7.evt_id) {
+			rule_data[extract_nb] = spec->hdr.type7.evt_id;
+			mask_data[extract_nb] = mask->hdr.type7.evt_id;
+			extract_size[extract_nb] = sizeof(uint8_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_event_ind,
+					evt_id);
+			extract_nb++;
+		}
+		if (mask->hdr.type7.evt_type) {
+			rule_data[extract_nb] = spec->hdr.type7.evt_type;
+			mask_data[extract_nb] = mask->hdr.type7.evt_type;
+			extract_size[extract_nb] = sizeof(uint8_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_event_ind,
+					evt_type);
+			extract_nb++;
+		}
+		if (mask->hdr.type7.seq) {
+			rule_data[extract_nb] = spec->hdr.type7.seq;
+			mask_data[extract_nb] = mask->hdr.type7.seq;
+			extract_size[extract_nb] = sizeof(uint8_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_event_ind,
+					seq);
+			extract_nb++;
+		}
+		if (mask->hdr.type7.number) {
+			rule_data[extract_nb] = spec->hdr.type7.number;
+			mask_data[extract_nb] = mask->hdr.type7.number;
+			extract_size[extract_nb] = sizeof(uint8_t);
+			extract_off[extract_nb] =
+				DPAA2_ECPRI_MSG_OFFSET +
+				offsetof(struct rte_ecpri_msg_event_ind,
+					number);
+			extract_nb++;
+		}
+	} else {
+		DPAA2_PMD_ERR("Invalid ecpri header type(%d)",
+				spec->hdr.common.type);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < extract_nb; i++) {
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			extract_off[i],
+			extract_size[i], &rule_data[i], &mask_data[i],
+			priv, group,
+			device_configured,
+			DPAA2_FLOW_QOS_TYPE);
+		if (ret)
+			return ret;
+
+		ret = dpaa2_flow_add_pr_extract_rule(flow,
+			extract_off[i],
+			extract_size[i], &rule_data[i], &mask_data[i],
+			priv, group,
+			device_configured,
+			DPAA2_FLOW_FS_TYPE);
+		if (ret)
+			return ret;
+	}
+
+	(*device_configured) |= local_cfg;
+
+	return 0;
+}
+
 static int
 dpaa2_configure_flow_raw(struct dpaa2_dev_flow *flow,
 	struct rte_eth_dev *dev,
@@ -3870,6 +4205,16 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
 				goto end_flow_set;
 			}
 			break;
+		case RTE_FLOW_ITEM_TYPE_ECPRI:
+			ret = dpaa2_configure_flow_ecpri(flow,
+					dev, attr, &dpaa2_pattern[i],
+					actions, error,
+					&is_keycfg_configured);
+			if (ret) {
+				DPAA2_PMD_ERR("ECPRI flow config failed!");
+				goto end_flow_set;
+			}
+			break;
 		case RTE_FLOW_ITEM_TYPE_RAW:
 			ret = dpaa2_configure_flow_raw(flow, dev, attr,
 						       &dpaa2_pattern[i],
@@ -3884,7 +4229,8 @@ dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
 			end_of_list = 1;
 			break; /*End of List*/
 		default:
-			DPAA2_PMD_ERR("Invalid action type");
+			DPAA2_PMD_ERR("Invalid flow item[%d] type(%d)",
+				i, pattern[i].type);
 			ret = -ENOTSUP;
 			break;
 		}
-- 
2.25.1


  parent reply	other threads:[~2024-09-13  6:04 UTC|newest]

Thread overview: 88+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-13  5:59 [v1 00/43] DPAA2 specific patches vanshika.shukla
2024-09-13  5:59 ` [v1 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-13  5:59 ` [v1 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-13  5:59 ` [v1 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-13  5:59 ` [v1 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-13  5:59 ` [v1 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-13  5:59 ` [v1 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-13  5:59 ` [v1 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-13  5:59 ` [v1 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-13  5:59 ` [v1 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-13  5:59 ` [v1 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-13  5:59 ` [v1 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-13  5:59 ` [v1 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-13  5:59 ` [v1 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-13  5:59 ` [v1 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-13  5:59 ` [v1 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-13  5:59 ` [v1 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-13  5:59 ` [v1 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-13  5:59 ` [v1 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-13  5:59 ` [v1 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-13  5:59 ` [v1 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-13  5:59 ` [v1 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-13  5:59 ` [v1 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-13  5:59 ` [v1 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-13  5:59 ` [v1 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-13  5:59 ` [v1 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-13  5:59 ` [v1 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-13  5:59 ` [v1 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-13  5:59 ` [v1 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-13  5:59 ` [v1 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-13  5:59 ` vanshika.shukla [this message]
2024-09-13  5:59 ` [v1 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-13  5:59 ` [v1 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-13  5:59 ` [v1 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-13  5:59 ` [v1 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-13  5:59 ` [v1 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-13  5:59 ` [v1 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-13  5:59 ` [v1 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-13  5:59 ` [v1 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-13  5:59 ` [v1 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-13  5:59 ` [v1 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-13  5:59 ` [v1 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-13  5:59 ` [v1 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-13  5:59 ` [v1 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-09-18  7:50 ` [v2 00/43] DPAA2 specific patches vanshika.shukla
2024-09-18  7:50   ` [v2 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-18  7:50   ` [v2 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-18  7:50   ` [v2 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-18  7:50   ` [v2 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-18  7:50   ` [v2 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-18  7:50   ` [v2 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-18  7:50   ` [v2 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-18  7:50   ` [v2 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-18  7:50   ` [v2 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-18  7:50   ` [v2 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-18  7:50   ` [v2 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-18  7:50   ` [v2 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-18  7:50   ` [v2 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-18  7:50   ` [v2 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-18  7:50   ` [v2 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-18  7:50   ` [v2 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-18  7:50   ` [v2 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-18  7:50   ` [v2 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-18  7:50   ` [v2 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-18  7:50   ` [v2 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-18  7:50   ` [v2 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-18  7:50   ` [v2 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-18  7:50   ` [v2 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-18  7:50   ` [v2 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-18  7:50   ` [v2 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-18  7:50   ` [v2 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-18  7:50   ` [v2 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-18  7:50   ` [v2 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-18  7:50   ` [v2 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-18  7:50   ` [v2 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-18  7:50   ` [v2 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-18  7:50   ` [v2 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-18  7:50   ` [v2 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-18  7:50   ` [v2 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-18  7:50   ` [v2 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-18  7:50   ` [v2 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-18  7:50   ` [v2 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-18  7:50   ` [v2 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-18  7:50   ` [v2 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-18  7:50   ` [v2 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-18  7:50   ` [v2 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-18  7:50   ` [v2 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-18  7:50   ` [v2 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240913055959.3246917-31-vanshika.shukla@nxp.com \
    --to=vanshika.shukla@nxp.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=jun.yang@nxp.com \
    --cc=sachin.saxena@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).