DPDK patches and discussions
 help / color / mirror / Atom feed
From: Damodharam Ammepalli <damodharam.ammepalli@broadcom.com>
To: dev@dpdk.org, damodharam.ammepalli@broadcom.com
Cc: ajit.khaparde@broadcom.com, ferruh.yigit@amd.com,
	huangdengdui@huawei.com, kalesh-anakkur.purayil@broadcom.com
Subject: [PATCH v5 2/2] net/bnxt: code refactor for supporting speed lanes
Date: Wed,  4 Sep 2024 10:50:58 -0700	[thread overview]
Message-ID: <20240904175151.47780-3-damodharam.ammepalli@broadcom.com> (raw)
In-Reply-To: <20240904175151.47780-1-damodharam.ammepalli@broadcom.com>

Broadcom Thor2 NICs support link mode settings where user
can configure fixed speed and associated supported number of
lanes. This patch does code-refactoring to address proposed
poll mode library design updates.

Signed-off-by: Damodharam Ammepalli <damodharam.ammepalli@broadcom.com>
---
 drivers/net/bnxt/bnxt.h        |   3 +
 drivers/net/bnxt/bnxt_ethdev.c | 182 ++++++++++++++++++++++++++++++---
 drivers/net/bnxt/bnxt_hwrm.c   |  40 ++++++--
 3 files changed, 206 insertions(+), 19 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index aaa7ea00cc..667fc84eb2 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -328,6 +328,7 @@ struct bnxt_link_info {
 	uint16_t                cfg_auto_link_speeds2_mask;
 	uint8_t                 active_lanes;
 	uint8_t			option_flags;
+	uint16_t                pmd_speed_lanes;
 };
 
 #define BNXT_COS_QUEUE_COUNT	8
@@ -1219,6 +1220,7 @@ extern int bnxt_logtype_driver;
 #define BNXT_LINK_SPEEDS_V2_VF(bp) (BNXT_VF((bp)) && ((bp)->link_info->option_flags))
 #define BNXT_LINK_SPEEDS_V2(bp) (((bp)->link_info) && (((bp)->link_info->support_speeds_v2) || \
 						       BNXT_LINK_SPEEDS_V2_VF((bp))))
+#define BNXT_MAX_SPEED_LANES 8
 extern const struct rte_flow_ops bnxt_ulp_rte_flow_ops;
 int32_t bnxt_ulp_port_init(struct bnxt *bp);
 void bnxt_ulp_port_deinit(struct bnxt *bp);
@@ -1244,4 +1246,5 @@ int bnxt_flow_meter_ops_get(struct rte_eth_dev *eth_dev, void *arg);
 struct bnxt_vnic_info *bnxt_get_default_vnic(struct bnxt *bp);
 struct tf *bnxt_get_tfp_session(struct bnxt *bp, enum bnxt_session_type type);
 uint64_t bnxt_eth_rss_support(struct bnxt *bp);
+uint16_t bnxt_parse_eth_link_speed_v2(struct bnxt *bp);
 #endif
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index e63febe782..9bd26c5149 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -122,6 +122,30 @@ static const char *const bnxt_dev_args[] = {
 	NULL
 };
 
+#define BNXT_SPEEDS_SUPP_SPEED_LANES (RTE_ETH_LINK_SPEED_10G | \
+				      RTE_ETH_LINK_SPEED_25G | \
+				      RTE_ETH_LINK_SPEED_40G | \
+				      RTE_ETH_LINK_SPEED_50G | \
+				      RTE_ETH_LINK_SPEED_100G | \
+				      RTE_ETH_LINK_SPEED_200G | \
+				      RTE_ETH_LINK_SPEED_400G)
+
+static const struct rte_eth_speed_lanes_capa speed_lanes_capa_tbl[] = {
+	{ RTE_ETH_SPEED_NUM_10G, RTE_ETH_SPEED_LANES_CAPA_MASK(LANE_1) },
+	{ RTE_ETH_SPEED_NUM_25G, RTE_ETH_SPEED_LANES_CAPA_MASK(LANE_1) },
+
+	{ RTE_ETH_SPEED_NUM_40G, RTE_ETH_SPEED_LANES_CAPA_MASK(LANE_4) },
+	{ RTE_ETH_SPEED_NUM_50G, RTE_ETH_SPEED_LANES_CAPA_MASK(LANE_1) |
+		RTE_ETH_SPEED_LANES_CAPA_MASK(LANE_2) },
+	{ RTE_ETH_SPEED_NUM_100G, RTE_ETH_SPEED_LANES_CAPA_MASK(LANE_1) |
+		RTE_ETH_SPEED_LANES_CAPA_MASK(LANE_2) |
+			RTE_ETH_SPEED_LANES_CAPA_MASK(LANE_4) },
+	{ RTE_ETH_SPEED_NUM_200G, RTE_ETH_SPEED_LANES_CAPA_MASK(LANE_2) |
+		RTE_ETH_SPEED_LANES_CAPA_MASK(LANE_4) },
+	{ RTE_ETH_SPEED_NUM_400G, RTE_ETH_SPEED_LANES_CAPA_MASK(LANE_4) |
+		RTE_ETH_SPEED_LANES_CAPA_MASK(LANE_8) },
+};
+
 /*
  * cqe-mode = an non-negative 8-bit number
  */
@@ -696,22 +720,50 @@ static inline bool bnxt_force_link_config(struct bnxt *bp)
 	}
 }
 
-static int bnxt_update_phy_setting(struct bnxt *bp)
+static int bnxt_validate_speed_lanes_change(struct bnxt *bp)
 {
 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
 	struct rte_eth_link *link = &bp->eth_dev->data->dev_link;
-	struct rte_eth_link new;
 	uint32_t curr_speed_bit;
 	int rc;
 
+	/* Check if speed x lanes combo is supported */
+	if (dev_conf->link_speeds)  {
+		rc = bnxt_parse_eth_link_speed_v2(bp);
+		if (rc == 0)
+			return -EINVAL;
+	}
+
+	/* convert to speedbit flag */
+	curr_speed_bit = rte_eth_speed_bitflag((uint32_t)link->link_speed, 1);
+
+	/* check if speed and lanes have changed */
+	if (dev_conf->link_speeds != curr_speed_bit ||
+	    bp->link_info->active_lanes != bp->link_info->pmd_speed_lanes)
+		return 1;
+
+	return 0;
+}
+
+static int bnxt_update_phy_setting(struct bnxt *bp)
+{
+	struct rte_eth_link new;
+	int rc, rc1 = 0;
+
 	rc = bnxt_get_hwrm_link_config(bp, &new);
 	if (rc) {
 		PMD_DRV_LOG(ERR, "Failed to get link settings\n");
 		return rc;
 	}
 
-	/* convert to speedbit flag */
-	curr_speed_bit = rte_eth_speed_bitflag((uint32_t)link->link_speed, 1);
+	/* Validate speeds2 requirements */
+	if (BNXT_LINK_SPEEDS_V2(bp)) {
+		rc1 = bnxt_validate_speed_lanes_change(bp);
+		if (rc1 == -EINVAL) {
+			PMD_DRV_LOG(ERR, "Failed to set correct lanes\n");
+			return rc1;
+		}
+	}
 
 	/*
 	 * Device is not obliged link down in certain scenarios, even
@@ -719,8 +771,7 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
 	 * to shutdown the port, bnxt_get_hwrm_link_config() call always
 	 * returns link up. Force phy update always in that case.
 	 */
-	if (!new.link_status || bnxt_force_link_config(bp) ||
-	    (BNXT_LINK_SPEEDS_V2(bp) && dev_conf->link_speeds != curr_speed_bit)) {
+	if (!new.link_status || bnxt_force_link_config(bp) || rc1 == 1) {
 		rc = bnxt_set_hwrm_link_config(bp, true);
 		if (rc) {
 			PMD_DRV_LOG(ERR, "Failed to update PHY settings\n");
@@ -1331,16 +1382,17 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
 {
 	struct rte_eth_link *link = &eth_dev->data->dev_link;
+	struct bnxt *bp = eth_dev->data->dev_private;
 
 	if (link->link_status)
-		PMD_DRV_LOG(DEBUG, "Port %d Link Up - speed %u Mbps - %s\n",
-			eth_dev->data->port_id,
-			(uint32_t)link->link_speed,
-			(link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
-			("full-duplex") : ("half-duplex\n"));
+		PMD_DRV_LOG(DEBUG, "Port %d Link Up - speed %u Mbps - %s Lanes - %d\n",
+			    eth_dev->data->port_id,
+			    (uint32_t)link->link_speed,
+			    (link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
+			    ("full-duplex") : ("half-duplex\n"),
+			    (uint16_t)bp->link_info->active_lanes);
 	else
-		PMD_DRV_LOG(INFO, "Port %d Link Down\n",
-			eth_dev->data->port_id);
+		PMD_DRV_LOG(INFO, "Port %d Link Down\n", eth_dev->data->port_id);
 }
 
 /*
@@ -4191,6 +4243,105 @@ static int bnxt_get_module_eeprom(struct rte_eth_dev *dev,
 	return length ? -EINVAL : 0;
 }
 
+#if (RTE_VERSION_NUM(22, 11, 0, 0) <= RTE_VERSION)
+static int bnxt_speed_lanes_set(struct rte_eth_dev *dev, uint32_t speed_lanes)
+{
+	struct bnxt *bp = dev->data->dev_private;
+
+	if (!BNXT_LINK_SPEEDS_V2(bp))
+		return -ENOTSUP;
+
+	bp->link_info->pmd_speed_lanes = speed_lanes;
+
+	return 0;
+}
+
+static uint32_t
+bnxt_get_speed_lanes_capa(struct rte_eth_speed_lanes_capa *speed_lanes_capa,
+			  uint32_t speed_capa)
+{
+	uint32_t speed_bit;
+	uint32_t num = 0;
+	uint32_t i;
+
+	for (i = 0; i < RTE_DIM(speed_lanes_capa_tbl); i++) {
+		speed_bit =
+			rte_eth_speed_bitflag(speed_lanes_capa_tbl[i].speed,
+					      RTE_ETH_LINK_FULL_DUPLEX);
+		if ((speed_capa & speed_bit) == 0)
+			continue;
+
+		speed_lanes_capa[num].speed = speed_lanes_capa_tbl[i].speed;
+		speed_lanes_capa[num].capa = speed_lanes_capa_tbl[i].capa;
+		num++;
+	}
+
+	return num;
+}
+
+static int bnxt_speed_lanes_get_capa(struct rte_eth_dev *dev,
+				     struct rte_eth_speed_lanes_capa *speed_lanes_capa,
+				     unsigned int num)
+{
+	struct rte_eth_link *link = &dev->data->dev_link;
+	struct bnxt *bp = dev->data->dev_private;
+	unsigned int speed_num;
+	uint32_t speed_capa;
+	int rc;
+
+	rc = is_bnxt_in_error(bp);
+	if (rc)
+		return rc;
+
+	if (!BNXT_LINK_SPEEDS_V2(bp))
+		return -ENOTSUP;
+
+	/* speed_num counts number of speed capabilities.
+	 * When link is down, show the user choice all combinations of speeds x lanes
+	 */
+	if (link->link_status) {
+		speed_capa = bnxt_get_speed_capabilities_v2(bp);
+		speed_num = rte_popcount32(speed_capa & BNXT_SPEEDS_SUPP_SPEED_LANES);
+	} else {
+		speed_capa = BNXT_SPEEDS_SUPP_SPEED_LANES;
+		speed_num = rte_popcount32(BNXT_SPEEDS_SUPP_SPEED_LANES);
+	}
+	if (speed_num == 0)
+		return -ENOTSUP;
+
+	if (speed_lanes_capa == NULL)
+		return speed_num;
+
+	if (num < speed_num)
+		return -EINVAL;
+
+	return bnxt_get_speed_lanes_capa(speed_lanes_capa, speed_capa);
+}
+
+static int bnxt_speed_lanes_get(struct rte_eth_dev *dev, uint32_t *lanes)
+{
+	struct rte_eth_link *link = &dev->data->dev_link;
+	struct bnxt *bp = dev->data->dev_private;
+	int rc;
+
+	rc = is_bnxt_in_error(bp);
+	if (rc)
+		return rc;
+
+	if (!BNXT_LINK_SPEEDS_V2(bp))
+		return -ENOTSUP;
+
+	if (!link->link_status)
+		return -EINVAL;
+
+	 /* user app expects lanes 1 for zero */
+	*lanes = (bp->link_info->active_lanes) ?
+		bp->link_info->active_lanes : 1;
+	return 0;
+}
+
+#endif
+
 /*
  * Initialization
  */
@@ -4262,6 +4413,11 @@ static const struct eth_dev_ops bnxt_dev_ops = {
 	.timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
 	.timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
 	.mtr_ops_get = bnxt_flow_meter_ops_get,
+#if (RTE_VERSION_NUM(22, 11, 0, 0) <= RTE_VERSION)
+	.speed_lanes_get = bnxt_speed_lanes_get,
+	.speed_lanes_set = bnxt_speed_lanes_set,
+	.speed_lanes_get_capa = bnxt_speed_lanes_get_capa,
+#endif
 };
 
 static uint32_t bnxt_map_reset_regs(struct bnxt *bp, uint32_t reg)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index fc142672f6..bb9032b4f8 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -72,6 +72,7 @@ struct link_speeds2_tbl {
 	uint32_t rte_speed_num;
 	uint16_t hwrm_speed;
 	uint16_t sig_mode;
+	uint16_t lanes;
 	const char *desc;
 } link_speeds2_tbl[] = {
 	{
@@ -81,6 +82,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_1G,
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_1GB,
 		BNXT_SIG_MODE_NRZ,
+		1,
 		"1Gb NRZ",
 	}, {
 		100,
@@ -89,6 +91,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_10G,
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB,
 		BNXT_SIG_MODE_NRZ,
+		1,
 		"10Gb NRZ",
 	}, {
 		250,
@@ -97,6 +100,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_25G,
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB,
 		BNXT_SIG_MODE_NRZ,
+		1,
 		"25Gb NRZ",
 	}, {
 		400,
@@ -105,6 +109,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_40G,
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB,
 		BNXT_SIG_MODE_NRZ,
+		4,
 		"40Gb NRZ",
 	}, {
 		500,
@@ -113,6 +118,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_50G,
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB,
 		BNXT_SIG_MODE_NRZ,
+		2,
 		"50Gb NRZ",
 	}, {
 		1000,
@@ -121,6 +127,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_100G,
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB,
 		BNXT_SIG_MODE_NRZ,
+		4,
 		"100Gb NRZ",
 	}, {
 		501,
@@ -129,6 +136,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_50G,
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB_PAM4_56,
 		BNXT_SIG_MODE_PAM4,
+		1,
 		"50Gb (PAM4-56: 50G per lane)",
 	}, {
 		1001,
@@ -137,6 +145,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_100G,
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_56,
 		BNXT_SIG_MODE_PAM4,
+		2,
 		"100Gb (PAM4-56: 50G per lane)",
 	}, {
 		2001,
@@ -145,6 +154,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_200G,
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_56,
 		BNXT_SIG_MODE_PAM4,
+		4,
 		"200Gb (PAM4-56: 50G per lane)",
 	}, {
 		4001,
@@ -153,6 +163,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_400G,
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_56,
 		BNXT_SIG_MODE_PAM4,
+		8,
 		"400Gb (PAM4-56: 50G per lane)",
 	}, {
 		1002,
@@ -161,6 +172,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_100G,
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_112,
 		BNXT_SIG_MODE_PAM4_112,
+		1,
 		"100Gb (PAM4-112: 100G per lane)",
 	}, {
 		2002,
@@ -169,6 +181,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_200G,
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_112,
 		BNXT_SIG_MODE_PAM4_112,
+		2,
 		"200Gb (PAM4-112: 100G per lane)",
 	}, {
 		4002,
@@ -177,6 +190,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_400G,
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_112,
 		BNXT_SIG_MODE_PAM4_112,
+		4,
 		"400Gb (PAM4-112: 100G per lane)",
 	}, {
 		0,
@@ -185,6 +199,7 @@ struct link_speeds2_tbl {
 		RTE_ETH_SPEED_NUM_NONE,	/* None matches, No speed */
 		HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_1GB, /* Placeholder for wrong HWRM */
 		BNXT_SIG_MODE_NRZ, /* default sig */
+		0,
 		"Unknown",
 	},
 };
@@ -264,17 +279,29 @@ static const char *bnxt_get_xcvr_type(uint32_t xcvr_identifier_type_tx_lpi_timer
 /* Utility function to lookup speeds2 table and
  * return a rte to hwrm speed matching row to the client
  */
-static
-struct link_speeds2_tbl *bnxt_get_rte_hwrm_speeds2_entry(uint32_t speed)
+static struct link_speeds2_tbl *bnxt_get_rte_hwrm_speeds2_entry(struct bnxt *bp)
 {
 	int i, max;
+	uint32_t speed, lanes;
+	bool check_lanes;
+	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+
+	speed = dev_conf->link_speeds;
+	lanes = bp->link_info->pmd_speed_lanes;
 
 	max = BNXT_SPEEDS2_TBL_SZ - 1;
 	speed &= ~RTE_ETH_LINK_SPEED_FIXED;
+	check_lanes = !(lanes == 0);
+
 	for (i = 0; i < max; i++) {
-		if (speed == link_speeds2_tbl[i].rte_speed)
+		if (speed == link_speeds2_tbl[i].rte_speed &&
+		    (lanes == link_speeds2_tbl[i].lanes || !check_lanes))
 			break;
 	}
+
+	if (!check_lanes)
+		PMD_DRV_LOG(INFO, "Given lanes %d, Configuring default lanes %d %s\n",
+			    lanes, link_speeds2_tbl[i].lanes, link_speeds2_tbl[i].desc);
 	return (struct link_speeds2_tbl *)&link_speeds2_tbl[i];
 }
 
@@ -3579,11 +3606,11 @@ static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
 	return !conf_link;
 }
 
-static uint16_t bnxt_parse_eth_link_speed_v2(uint32_t conf_link_speed)
+uint16_t bnxt_parse_eth_link_speed_v2(struct bnxt *bp)
 {
 	/* get bitmap value based on speed */
 	return ((struct link_speeds2_tbl *)
-		bnxt_get_rte_hwrm_speeds2_entry(conf_link_speed))->force_val;
+		bnxt_get_rte_hwrm_speeds2_entry(bp))->force_val;
 }
 
 static uint16_t bnxt_parse_eth_link_speed(struct bnxt *bp, uint32_t conf_link_speed,
@@ -3598,7 +3625,7 @@ static uint16_t bnxt_parse_eth_link_speed(struct bnxt *bp, uint32_t conf_link_sp
 
 	/* Handle P7 chips saperately. It got enhanced phy attribs to choose from */
 	if (BNXT_LINK_SPEEDS_V2(bp))
-		return bnxt_parse_eth_link_speed_v2(conf_link_speed);
+		return bnxt_parse_eth_link_speed_v2(bp);
 
 	switch (conf_link_speed & ~RTE_ETH_LINK_SPEED_FIXED) {
 	case RTE_ETH_LINK_SPEED_100M:
@@ -3902,6 +3929,7 @@ static int bnxt_hwrm_port_phy_cfg_v2(struct bnxt *bp, struct bnxt_link_info *con
 	} else {
 		enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_LINK_SPEEDS2;
 		req.force_link_speeds2 = rte_cpu_to_le_16(conf->link_speed);
+		PMD_DRV_LOG(INFO, "Force speed %d\n", conf->link_speed);
 	}
 
 	/* Fill rest of the req message */
-- 
2.43.5


-- 
This electronic communication and the information and any files transmitted 
with it, or attached to it, are confidential and are intended solely for 
the use of the individual or entity to whom it is addressed and may contain 
information that is confidential, legally privileged, protected by privacy 
laws, or otherwise restricted from disclosure to anyone else. If you are 
not the intended recipient or the person responsible for delivering the 
e-mail to the intended recipient, you are hereby notified that any use, 
copying, distributing, dissemination, forwarding, printing, or copying of 
this e-mail is strictly prohibited. If you received this e-mail in error, 
please return the e-mail to the sender, delete it from your computer, and 
destroy any printed copy of it.

      parent reply	other threads:[~2024-09-04 17:57 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-04 17:50 [PATCH v5 0/2] Add link_speed lanes support Damodharam Ammepalli
2024-09-04 17:50 ` [PATCH v5 1/2] ethdev: " Damodharam Ammepalli
2024-09-04 17:50 ` Damodharam Ammepalli [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240904175151.47780-3-damodharam.ammepalli@broadcom.com \
    --to=damodharam.ammepalli@broadcom.com \
    --cc=ajit.khaparde@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=huangdengdui@huawei.com \
    --cc=kalesh-anakkur.purayil@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).