DPDK patches and discussions
 help / color / mirror / Atom feed
From: vanshika.shukla@nxp.com
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
	Sachin Saxena <sachin.saxena@nxp.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [v1 25/43] net/dpaa2: dump Rx parser result
Date: Fri, 13 Sep 2024 11:29:41 +0530	[thread overview]
Message-ID: <20240913055959.3246917-26-vanshika.shukla@nxp.com> (raw)
In-Reply-To: <20240913055959.3246917-1-vanshika.shukla@nxp.com>

From: Jun Yang <jun.yang@nxp.com>

export DPAA2_PRINT_RX_PARSER_RESULT=1 is used to dump
RX parser result and frame attribute flags generated by
hardware parser and soft parser.
The parser results are converted to big endian described in RM.
The areas set by soft parser are dump as well.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.c     |   5 +
 drivers/net/dpaa2/dpaa2_ethdev.h     |  90 ++++++++++
 drivers/net/dpaa2/dpaa2_parse_dump.h | 248 +++++++++++++++++++++++++++
 drivers/net/dpaa2/dpaa2_rxtx.c       |   7 +
 4 files changed, 350 insertions(+)
 create mode 100644 drivers/net/dpaa2/dpaa2_parse_dump.h

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 533effd72b..000d7da85c 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -75,6 +75,8 @@ int dpaa2_timestamp_dynfield_offset = -1;
 /* Enable error queue */
 bool dpaa2_enable_err_queue;
 
+bool dpaa2_print_parser_result;
+
 #define MAX_NB_RX_DESC		11264
 int total_nb_rx_desc;
 
@@ -2727,6 +2729,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
 		DPAA2_PMD_INFO("Enable error queue");
 	}
 
+	if (getenv("DPAA2_PRINT_RX_PARSER_RESULT"))
+		dpaa2_print_parser_result = 1;
+
 	/* Allocate memory for hardware structure for queues */
 	ret = dpaa2_alloc_rx_tx_queues(eth_dev);
 	if (ret) {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index ea1c1b5117..c864859b3f 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -19,6 +19,8 @@
 #include <mc/fsl_dpni.h>
 #include <mc/fsl_mc_sys.h>
 
+#include "base/dpaa2_hw_dpni_annot.h"
+
 #define DPAA2_MIN_RX_BUF_SIZE 512
 #define DPAA2_MAX_RX_PKT_LEN  10240 /*WRIOP support*/
 #define NET_DPAA2_PMD_DRIVER_NAME net_dpaa2
@@ -152,6 +154,88 @@ extern const struct rte_tm_ops dpaa2_tm_ops;
 
 extern bool dpaa2_enable_err_queue;
 
+extern bool dpaa2_print_parser_result;
+
+#define DPAA2_FAPR_SIZE \
+	(sizeof(struct dpaa2_annot_hdr) - \
+	offsetof(struct dpaa2_annot_hdr, word3))
+
+#define DPAA2_PR_NXTHDR_OFFSET 0
+
+#define DPAA2_FAFE_PSR_OFFSET 2
+#define DPAA2_FAFE_PSR_SIZE 2
+
+#define DPAA2_FAF_PSR_OFFSET 4
+#define DPAA2_FAF_PSR_SIZE 12
+
+#define DPAA2_FAF_TOTAL_SIZE \
+	(DPAA2_FAFE_PSR_SIZE + DPAA2_FAF_PSR_SIZE)
+
+/* Just most popular Frame attribute flags (FAF) here.*/
+enum dpaa2_rx_faf_offset {
+	/* Set by SP start*/
+	FAFE_VXLAN_IN_VLAN_FRAM = 0,
+	FAFE_VXLAN_IN_IPV4_FRAM = 1,
+	FAFE_VXLAN_IN_IPV6_FRAM = 2,
+	FAFE_VXLAN_IN_UDP_FRAM = 3,
+	FAFE_VXLAN_IN_TCP_FRAM = 4,
+	/* Set by SP end*/
+
+	FAF_GTP_PRIMED_FRAM = 1 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_PTP_FRAM = 3 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_VXLAN_FRAM = 4 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_ETH_FRAM = 10 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_LLC_SNAP_FRAM = 18 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_VLAN_FRAM = 21 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_PPPOE_PPP_FRAM = 25 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_MPLS_FRAM = 27 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_ARP_FRAM = 30 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_IPV4_FRAM = 34 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_IPV6_FRAM = 42 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_IP_FRAM = 48 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_ICMP_FRAM = 57 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_IGMP_FRAM = 58 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_GRE_FRAM = 65 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_UDP_FRAM = 70 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_TCP_FRAM = 72 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_IPSEC_FRAM = 77 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_IPSEC_ESP_FRAM = 78 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_IPSEC_AH_FRAM = 79 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_SCTP_FRAM = 81 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_DCCP_FRAM = 83 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_GTP_FRAM = 87 + DPAA2_FAFE_PSR_SIZE * 8,
+	FAF_ESP_FRAM = 89 + DPAA2_FAFE_PSR_SIZE * 8,
+};
+
+#define DPAA2_PR_ETH_OFF_OFFSET 19
+#define DPAA2_PR_TCI_OFF_OFFSET 21
+#define DPAA2_PR_LAST_ETYPE_OFFSET 23
+#define DPAA2_PR_L3_OFF_OFFSET 27
+#define DPAA2_PR_L4_OFF_OFFSET 30
+#define DPAA2_PR_L5_OFF_OFFSET 31
+#define DPAA2_PR_NXTHDR_OFF_OFFSET 34
+
+/* Set by SP for vxlan distribution start*/
+#define DPAA2_VXLAN_IN_TCI_OFFSET 16
+
+#define DPAA2_VXLAN_IN_DADDR0_OFFSET 20
+#define DPAA2_VXLAN_IN_DADDR1_OFFSET 22
+#define DPAA2_VXLAN_IN_DADDR2_OFFSET 24
+#define DPAA2_VXLAN_IN_DADDR3_OFFSET 25
+#define DPAA2_VXLAN_IN_DADDR4_OFFSET 26
+#define DPAA2_VXLAN_IN_DADDR5_OFFSET 28
+
+#define DPAA2_VXLAN_IN_SADDR0_OFFSET 29
+#define DPAA2_VXLAN_IN_SADDR1_OFFSET 32
+#define DPAA2_VXLAN_IN_SADDR2_OFFSET 33
+#define DPAA2_VXLAN_IN_SADDR3_OFFSET 35
+#define DPAA2_VXLAN_IN_SADDR4_OFFSET 41
+#define DPAA2_VXLAN_IN_SADDR5_OFFSET 42
+
+#define DPAA2_VXLAN_VNI_OFFSET 43
+#define DPAA2_VXLAN_IN_TYPE_OFFSET 46
+/* Set by SP for vxlan distribution end*/
+
 struct ipv4_sd_addr_extract_rule {
 	uint32_t ipv4_src;
 	uint32_t ipv4_dst;
@@ -197,7 +281,13 @@ enum ip_addr_extract_type {
 	IP_DST_SRC_EXTRACT
 };
 
+enum key_prot_type {
+	DPAA2_NET_PROT_KEY,
+	DPAA2_FAF_KEY
+};
+
 struct key_prot_field {
+	enum key_prot_type type;
 	enum net_prot prot;
 	uint32_t key_field;
 };
diff --git a/drivers/net/dpaa2/dpaa2_parse_dump.h b/drivers/net/dpaa2/dpaa2_parse_dump.h
new file mode 100644
index 0000000000..f1cdc003de
--- /dev/null
+++ b/drivers/net/dpaa2/dpaa2_parse_dump.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ *   Copyright 2022 NXP
+ *
+ */
+
+#ifndef _DPAA2_PARSE_DUMP_H
+#define _DPAA2_PARSE_DUMP_H
+
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_pmd_dpaa2.h>
+
+#include <dpaa2_hw_pvt.h>
+#include "dpaa2_tm.h"
+
+#include <mc/fsl_dpni.h>
+#include <mc/fsl_mc_sys.h>
+
+#include "base/dpaa2_hw_dpni_annot.h"
+
+#define DPAA2_PR_PRINT printf
+
+struct dpaa2_faf_bit_info {
+	const char *name;
+	int position;
+};
+
+struct dpaa2_fapr_field_info {
+	const char *name;
+	uint16_t value;
+};
+
+struct dpaa2_fapr_array {
+	union {
+		uint64_t pr_64[DPAA2_FAPR_SIZE / 8];
+		uint8_t pr[DPAA2_FAPR_SIZE];
+	};
+};
+
+#define NEXT_HEADER_NAME "Next Header"
+#define ETH_OFF_NAME "ETH OFFSET"
+#define VLAN_TCI_OFF_NAME "VLAN TCI OFFSET"
+#define LAST_ENTRY_OFF_NAME "LAST ETYPE Offset"
+#define L3_OFF_NAME "L3 Offset"
+#define L4_OFF_NAME "L4 Offset"
+#define L5_OFF_NAME "L5 Offset"
+#define NEXT_HEADER_OFF_NAME "Next Header Offset"
+
+static const
+struct dpaa2_fapr_field_info support_dump_fields[] = {
+	{
+		.name = NEXT_HEADER_NAME,
+	},
+	{
+		.name = ETH_OFF_NAME,
+	},
+	{
+		.name = VLAN_TCI_OFF_NAME,
+	},
+	{
+		.name = LAST_ENTRY_OFF_NAME,
+	},
+	{
+		.name = L3_OFF_NAME,
+	},
+	{
+		.name = L4_OFF_NAME,
+	},
+	{
+		.name = L5_OFF_NAME,
+	},
+	{
+		.name = NEXT_HEADER_OFF_NAME,
+	}
+};
+
+static inline void
+dpaa2_print_faf(struct dpaa2_fapr_array *fapr)
+{
+	const int faf_bit_len = DPAA2_FAF_TOTAL_SIZE * 8;
+	struct dpaa2_faf_bit_info faf_bits[faf_bit_len];
+	int i, byte_pos, bit_pos, vxlan = 0, vxlan_vlan = 0;
+	struct rte_ether_hdr vxlan_in_eth;
+	uint16_t vxlan_vlan_tci;
+
+	for (i = 0; i < faf_bit_len; i++) {
+		faf_bits[i].position = i;
+		if (i == FAFE_VXLAN_IN_VLAN_FRAM)
+			faf_bits[i].name = "VXLAN VLAN Present";
+		else if (i == FAFE_VXLAN_IN_IPV4_FRAM)
+			faf_bits[i].name = "VXLAN IPV4 Present";
+		else if (i == FAFE_VXLAN_IN_IPV6_FRAM)
+			faf_bits[i].name = "VXLAN IPV6 Present";
+		else if (i == FAFE_VXLAN_IN_UDP_FRAM)
+			faf_bits[i].name = "VXLAN UDP Present";
+		else if (i == FAFE_VXLAN_IN_TCP_FRAM)
+			faf_bits[i].name = "VXLAN TCP Present";
+		else if (i == FAF_VXLAN_FRAM)
+			faf_bits[i].name = "VXLAN Present";
+		else if (i == FAF_ETH_FRAM)
+			faf_bits[i].name = "Ethernet MAC Present";
+		else if (i == FAF_VLAN_FRAM)
+			faf_bits[i].name = "VLAN 1 Present";
+		else if (i == FAF_IPV4_FRAM)
+			faf_bits[i].name = "IPv4 1 Present";
+		else if (i == FAF_IPV6_FRAM)
+			faf_bits[i].name = "IPv6 1 Present";
+		else if (i == FAF_UDP_FRAM)
+			faf_bits[i].name = "UDP Present";
+		else if (i == FAF_TCP_FRAM)
+			faf_bits[i].name = "TCP Present";
+		else
+			faf_bits[i].name = "Check RM for this unusual frame";
+	}
+
+	DPAA2_PR_PRINT("Frame Annotation Flags:\r\n");
+	for (i = 0; i < faf_bit_len; i++) {
+		byte_pos = i / 8 + DPAA2_FAFE_PSR_OFFSET;
+		bit_pos = i % 8;
+		if (fapr->pr[byte_pos] & (1 << (7 - bit_pos))) {
+			DPAA2_PR_PRINT("FAF bit %d : %s\r\n",
+				faf_bits[i].position, faf_bits[i].name);
+			if (i == FAF_VXLAN_FRAM)
+				vxlan = 1;
+		}
+	}
+
+	if (vxlan) {
+		vxlan_in_eth.dst_addr.addr_bytes[0] =
+			fapr->pr[DPAA2_VXLAN_IN_DADDR0_OFFSET];
+		vxlan_in_eth.dst_addr.addr_bytes[1] =
+			fapr->pr[DPAA2_VXLAN_IN_DADDR1_OFFSET];
+		vxlan_in_eth.dst_addr.addr_bytes[2] =
+			fapr->pr[DPAA2_VXLAN_IN_DADDR2_OFFSET];
+		vxlan_in_eth.dst_addr.addr_bytes[3] =
+			fapr->pr[DPAA2_VXLAN_IN_DADDR3_OFFSET];
+		vxlan_in_eth.dst_addr.addr_bytes[4] =
+			fapr->pr[DPAA2_VXLAN_IN_DADDR4_OFFSET];
+		vxlan_in_eth.dst_addr.addr_bytes[5] =
+			fapr->pr[DPAA2_VXLAN_IN_DADDR5_OFFSET];
+
+		vxlan_in_eth.src_addr.addr_bytes[0] =
+			fapr->pr[DPAA2_VXLAN_IN_SADDR0_OFFSET];
+		vxlan_in_eth.src_addr.addr_bytes[1] =
+			fapr->pr[DPAA2_VXLAN_IN_SADDR1_OFFSET];
+		vxlan_in_eth.src_addr.addr_bytes[2] =
+			fapr->pr[DPAA2_VXLAN_IN_SADDR2_OFFSET];
+		vxlan_in_eth.src_addr.addr_bytes[3] =
+			fapr->pr[DPAA2_VXLAN_IN_SADDR3_OFFSET];
+		vxlan_in_eth.src_addr.addr_bytes[4] =
+			fapr->pr[DPAA2_VXLAN_IN_SADDR4_OFFSET];
+		vxlan_in_eth.src_addr.addr_bytes[5] =
+			fapr->pr[DPAA2_VXLAN_IN_SADDR5_OFFSET];
+
+		vxlan_in_eth.ether_type =
+			fapr->pr[DPAA2_VXLAN_IN_TYPE_OFFSET];
+		vxlan_in_eth.ether_type =
+			vxlan_in_eth.ether_type << 8;
+		vxlan_in_eth.ether_type |=
+			fapr->pr[DPAA2_VXLAN_IN_TYPE_OFFSET + 1];
+
+		if (vxlan_in_eth.ether_type == RTE_ETHER_TYPE_VLAN)
+			vxlan_vlan = 1;
+		DPAA2_PR_PRINT("VXLAN inner eth:\r\n");
+		DPAA2_PR_PRINT("dst addr: ");
+		for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+			if (i != 0)
+				DPAA2_PR_PRINT(":");
+			DPAA2_PR_PRINT("%02x",
+				vxlan_in_eth.dst_addr.addr_bytes[i]);
+		}
+		DPAA2_PR_PRINT("\r\n");
+		DPAA2_PR_PRINT("src addr: ");
+		for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
+			if (i != 0)
+				DPAA2_PR_PRINT(":");
+			DPAA2_PR_PRINT("%02x",
+				vxlan_in_eth.src_addr.addr_bytes[i]);
+		}
+		DPAA2_PR_PRINT("\r\n");
+		DPAA2_PR_PRINT("type: 0x%04x\r\n",
+			vxlan_in_eth.ether_type);
+		if (vxlan_vlan) {
+			vxlan_vlan_tci = fapr->pr[DPAA2_VXLAN_IN_TCI_OFFSET];
+			vxlan_vlan_tci = vxlan_vlan_tci << 8;
+			vxlan_vlan_tci |=
+				fapr->pr[DPAA2_VXLAN_IN_TCI_OFFSET + 1];
+
+			DPAA2_PR_PRINT("vlan tci: 0x%04x\r\n",
+				vxlan_vlan_tci);
+		}
+	}
+}
+
+static inline void
+dpaa2_print_parse_result(struct dpaa2_annot_hdr *annotation)
+{
+	struct dpaa2_fapr_array fapr;
+	struct dpaa2_fapr_field_info
+		fapr_fields[sizeof(support_dump_fields) /
+		sizeof(struct dpaa2_fapr_field_info)];
+	uint64_t len, i;
+
+	memcpy(&fapr, &annotation->word3, DPAA2_FAPR_SIZE);
+	for (i = 0; i < (DPAA2_FAPR_SIZE / 8); i++)
+		fapr.pr_64[i] = rte_cpu_to_be_64(fapr.pr_64[i]);
+
+	memcpy(fapr_fields, support_dump_fields,
+		sizeof(support_dump_fields));
+
+	for (i = 0;
+		i < sizeof(fapr_fields) /
+		sizeof(struct dpaa2_fapr_field_info);
+		i++) {
+		if (!strcmp(fapr_fields[i].name, NEXT_HEADER_NAME)) {
+			fapr_fields[i].value = fapr.pr[DPAA2_PR_NXTHDR_OFFSET];
+			fapr_fields[i].value = fapr_fields[i].value << 8;
+			fapr_fields[i].value |=
+				fapr.pr[DPAA2_PR_NXTHDR_OFFSET + 1];
+		} else if (!strcmp(fapr_fields[i].name, ETH_OFF_NAME)) {
+			fapr_fields[i].value = fapr.pr[DPAA2_PR_ETH_OFF_OFFSET];
+		} else if (!strcmp(fapr_fields[i].name, VLAN_TCI_OFF_NAME)) {
+			fapr_fields[i].value = fapr.pr[DPAA2_PR_TCI_OFF_OFFSET];
+		} else if (!strcmp(fapr_fields[i].name, LAST_ENTRY_OFF_NAME)) {
+			fapr_fields[i].value =
+				fapr.pr[DPAA2_PR_LAST_ETYPE_OFFSET];
+		} else if (!strcmp(fapr_fields[i].name, L3_OFF_NAME)) {
+			fapr_fields[i].value = fapr.pr[DPAA2_PR_L3_OFF_OFFSET];
+		} else if (!strcmp(fapr_fields[i].name, L4_OFF_NAME)) {
+			fapr_fields[i].value = fapr.pr[DPAA2_PR_L4_OFF_OFFSET];
+		} else if (!strcmp(fapr_fields[i].name, L5_OFF_NAME)) {
+			fapr_fields[i].value = fapr.pr[DPAA2_PR_L5_OFF_OFFSET];
+		} else if (!strcmp(fapr_fields[i].name, NEXT_HEADER_OFF_NAME)) {
+			fapr_fields[i].value =
+				fapr.pr[DPAA2_PR_NXTHDR_OFF_OFFSET];
+		}
+	}
+
+	len = sizeof(fapr_fields) / sizeof(struct dpaa2_fapr_field_info);
+	DPAA2_PR_PRINT("Parse Result:\r\n");
+	for (i = 0; i < len; i++) {
+		DPAA2_PR_PRINT("%21s : 0x%02x\r\n",
+			fapr_fields[i].name, fapr_fields[i].value);
+	}
+	dpaa2_print_faf(&fapr);
+}
+
+#endif
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 23f7c4132d..4bb785aa49 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -25,6 +25,7 @@
 #include "dpaa2_pmd_logs.h"
 #include "dpaa2_ethdev.h"
 #include "base/dpaa2_hw_dpni_annot.h"
+#include "dpaa2_parse_dump.h"
 
 static inline uint32_t __rte_hot
 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
@@ -57,6 +58,9 @@ dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
 	struct dpaa2_annot_hdr *annotation =
 			(struct dpaa2_annot_hdr *)hw_annot_addr;
 
+	if (unlikely(dpaa2_print_parser_result))
+		dpaa2_print_parse_result(annotation);
+
 	m->packet_type = RTE_PTYPE_UNKNOWN;
 	switch (frc) {
 	case DPAA2_PKT_TYPE_ETHER:
@@ -252,6 +256,9 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
 	else
 		mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
 
+	if (unlikely(dpaa2_print_parser_result))
+		dpaa2_print_parse_result(annotation);
+
 	if (dpaa2_enable_ts[mbuf->port]) {
 		*dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
 		mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
-- 
2.25.1


  parent reply	other threads:[~2024-09-13  6:03 UTC|newest]

Thread overview: 88+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-13  5:59 [v1 00/43] DPAA2 specific patches vanshika.shukla
2024-09-13  5:59 ` [v1 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-13  5:59 ` [v1 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-13  5:59 ` [v1 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-13  5:59 ` [v1 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-13  5:59 ` [v1 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-13  5:59 ` [v1 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-13  5:59 ` [v1 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-13  5:59 ` [v1 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-13  5:59 ` [v1 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-13  5:59 ` [v1 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-13  5:59 ` [v1 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-13  5:59 ` [v1 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-13  5:59 ` [v1 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-13  5:59 ` [v1 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-13  5:59 ` [v1 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-13  5:59 ` [v1 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-13  5:59 ` [v1 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-13  5:59 ` [v1 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-13  5:59 ` [v1 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-13  5:59 ` [v1 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-13  5:59 ` [v1 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-13  5:59 ` [v1 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-13  5:59 ` [v1 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-13  5:59 ` [v1 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-13  5:59 ` vanshika.shukla [this message]
2024-09-13  5:59 ` [v1 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-13  5:59 ` [v1 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-13  5:59 ` [v1 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-13  5:59 ` [v1 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-13  5:59 ` [v1 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-13  5:59 ` [v1 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-13  5:59 ` [v1 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-13  5:59 ` [v1 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-13  5:59 ` [v1 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-13  5:59 ` [v1 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-13  5:59 ` [v1 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-13  5:59 ` [v1 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-13  5:59 ` [v1 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-13  5:59 ` [v1 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-13  5:59 ` [v1 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-13  5:59 ` [v1 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-13  5:59 ` [v1 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-13  5:59 ` [v1 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-09-18  7:50 ` [v2 00/43] DPAA2 specific patches vanshika.shukla
2024-09-18  7:50   ` [v2 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-18  7:50   ` [v2 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-18  7:50   ` [v2 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-18  7:50   ` [v2 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-18  7:50   ` [v2 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-18  7:50   ` [v2 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-18  7:50   ` [v2 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-18  7:50   ` [v2 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-18  7:50   ` [v2 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-18  7:50   ` [v2 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-18  7:50   ` [v2 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-18  7:50   ` [v2 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-18  7:50   ` [v2 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-18  7:50   ` [v2 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-18  7:50   ` [v2 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-18  7:50   ` [v2 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-18  7:50   ` [v2 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-18  7:50   ` [v2 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-18  7:50   ` [v2 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-18  7:50   ` [v2 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-18  7:50   ` [v2 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-18  7:50   ` [v2 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-18  7:50   ` [v2 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-18  7:50   ` [v2 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-18  7:50   ` [v2 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-18  7:50   ` [v2 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-18  7:50   ` [v2 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-18  7:50   ` [v2 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-18  7:50   ` [v2 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-18  7:50   ` [v2 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-18  7:50   ` [v2 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-18  7:50   ` [v2 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-18  7:50   ` [v2 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-18  7:50   ` [v2 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-18  7:50   ` [v2 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-18  7:50   ` [v2 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-18  7:50   ` [v2 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-18  7:50   ` [v2 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-18  7:50   ` [v2 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-18  7:50   ` [v2 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-18  7:50   ` [v2 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-18  7:50   ` [v2 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-18  7:50   ` [v2 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240913055959.3246917-26-vanshika.shukla@nxp.com \
    --to=vanshika.shukla@nxp.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=jun.yang@nxp.com \
    --cc=sachin.saxena@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).