DPDK patches and discussions
 help / color / mirror / Atom feed
From: Mingjin Ye <mingjinx.ye@intel.com>
To: dev@dpdk.org
Cc: qiming.yang@intel.com, Mingjin Ye <mingjinx.ye@intel.com>,
	stable@dpdk.org, Jingjing Wu <jingjing.wu@intel.com>,
	Beilei Xing <beilei.xing@intel.com>
Subject: [PATCH v7 1/2] net/iavf: fix Rx/Tx burst in multi-process
Date: Tue,  2 Jan 2024 10:52:10 +0000	[thread overview]
Message-ID: <20240102105211.788819-2-mingjinx.ye@intel.com> (raw)
In-Reply-To: <20240102105211.788819-1-mingjinx.ye@intel.com>

In a multi-process environment, a secondary process operates on shared
memory and changes the function pointer of the primary process, resulting
in a crash when the primary process cannot find the function address
during an Rx/Tx burst.

Fixes: 5b3124a0a6ef ("net/iavf: support no polling when link down")
Cc: stable@dpdk.org

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Add fix for Rx burst.
---
v3: fix Rx/Tx routing.
---
 drivers/net/iavf/iavf.h      |  42 ++++++++-
 drivers/net/iavf/iavf_rxtx.c | 173 +++++++++++++++++++++++------------
 2 files changed, 157 insertions(+), 58 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 10868f2c30..73a089c199 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -313,6 +313,44 @@ struct iavf_devargs {
 
 struct iavf_security_ctx;
 
+enum iavf_rx_burst_type {
+	IAVF_RX_DEFAULT,
+	IAVF_RX_FLEX_RXD,
+	IAVF_RX_BULK_ALLOC,
+	IAVF_RX_SCATTERED,
+	IAVF_RX_SCATTERED_FLEX_RXD,
+	IAVF_RX_SSE,
+	IAVF_RX_AVX2,
+	IAVF_RX_AVX2_OFFLOAD,
+	IAVF_RX_SSE_FLEX_RXD,
+	IAVF_RX_AVX2_FLEX_RXD,
+	IAVF_RX_AVX2_FLEX_RXD_OFFLOAD,
+	IAVF_RX_SSE_SCATTERED,
+	IAVF_RX_AVX2_SCATTERED,
+	IAVF_RX_AVX2_SCATTERED_OFFLOAD,
+	IAVF_RX_SSE_SCATTERED_FLEX_RXD,
+	IAVF_RX_AVX2_SCATTERED_FLEX_RXD,
+	IAVF_RX_AVX2_SCATTERED_FLEX_RXD_OFFLOAD,
+	IAVF_RX_AVX512,
+	IAVF_RX_AVX512_OFFLOAD,
+	IAVF_RX_AVX512_FLEX_RXD,
+	IAVF_RX_AVX512_FLEX_RXD_OFFLOAD,
+	IAVF_RX_AVX512_SCATTERED,
+	IAVF_RX_AVX512_SCATTERED_OFFLOAD,
+	IAVF_RX_AVX512_SCATTERED_FLEX_RXD,
+	IAVF_RX_AVX512_SCATTERED_FLEX_RXD_OFFLOAD,
+};
+
+enum iavf_tx_burst_type {
+	IAVF_TX_DEFAULT,
+	IAVF_TX_SSE,
+	IAVF_TX_AVX2,
+	IAVF_TX_AVX2_OFFLOAD,
+	IAVF_TX_AVX512,
+	IAVF_TX_AVX512_OFFLOAD,
+	IAVF_TX_AVX512_CTX_OFFLOAD,
+};
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -328,8 +366,8 @@ struct iavf_adapter {
 	bool stopped;
 	bool closed;
 	bool no_poll;
-	eth_rx_burst_t rx_pkt_burst;
-	eth_tx_burst_t tx_pkt_burst;
+	enum iavf_rx_burst_type rx_burst_type;
+	enum iavf_tx_burst_type tx_burst_type;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f19aa14646..13b932ad85 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3707,15 +3707,68 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 	return i;
 }
 
+static
+const eth_rx_burst_t iavf_rx_pkt_burst_ops[] = {
+	iavf_recv_pkts,
+	iavf_recv_pkts_flex_rxd,
+	iavf_recv_pkts_bulk_alloc,
+	iavf_recv_scattered_pkts,
+	iavf_recv_scattered_pkts_flex_rxd,
+#ifdef RTE_ARCH_X86
+	iavf_recv_pkts_vec,
+	iavf_recv_pkts_vec_avx2,
+	iavf_recv_pkts_vec_avx2_offload,
+	iavf_recv_pkts_vec_flex_rxd,
+	iavf_recv_pkts_vec_avx2_flex_rxd,
+	iavf_recv_pkts_vec_avx2_flex_rxd_offload,
+	iavf_recv_scattered_pkts_vec,
+	iavf_recv_scattered_pkts_vec_avx2,
+	iavf_recv_scattered_pkts_vec_avx2_offload,
+	iavf_recv_scattered_pkts_vec_flex_rxd,
+	iavf_recv_scattered_pkts_vec_avx2_flex_rxd,
+	iavf_recv_scattered_pkts_vec_avx2_flex_rxd_offload,
+#ifdef CC_AVX512_SUPPORT
+	iavf_recv_pkts_vec_avx512,
+	iavf_recv_pkts_vec_avx512_offload,
+	iavf_recv_pkts_vec_avx512_flex_rxd,
+	iavf_recv_pkts_vec_avx512_flex_rxd_offload,
+	iavf_recv_scattered_pkts_vec_avx512,
+	iavf_recv_scattered_pkts_vec_avx512_offload,
+	iavf_recv_scattered_pkts_vec_avx512_flex_rxd,
+	iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload,
+#endif
+#elif defined RTE_ARCH_ARM
+	iavf_recv_pkts_vec,
+#endif
+};
+
+static
+const eth_tx_burst_t iavf_tx_pkt_burst_ops[] = {
+	iavf_xmit_pkts,
+#ifdef RTE_ARCH_X86
+	iavf_xmit_pkts_vec,
+	iavf_xmit_pkts_vec_avx2,
+	iavf_xmit_pkts_vec_avx2_offload,
+#ifdef CC_AVX512_SUPPORT
+	iavf_xmit_pkts_vec_avx512,
+	iavf_xmit_pkts_vec_avx512_offload,
+	iavf_xmit_pkts_vec_avx512_ctx_offload,
+#endif
+#endif
+};
+
 static uint16_t
 iavf_recv_pkts_no_poll(void *rx_queue, struct rte_mbuf **rx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_rx_queue *rxq = rx_queue;
+	enum iavf_rx_burst_type rx_burst_type =
+		rxq->vsi->adapter->rx_burst_type;
+
 	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
 		return 0;
 
-	return rxq->vsi->adapter->rx_pkt_burst(rx_queue,
+	return iavf_rx_pkt_burst_ops[rx_burst_type](rx_queue,
 								rx_pkts, nb_pkts);
 }
 
@@ -3724,10 +3777,13 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_tx_queue *txq = tx_queue;
+	enum iavf_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
 	if (!txq->vsi || txq->vsi->adapter->no_poll)
 		return 0;
 
-	return txq->vsi->adapter->tx_pkt_burst(tx_queue,
+	return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
 								tx_pkts, nb_pkts);
 }
 
@@ -3738,6 +3794,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	enum iavf_rx_burst_type rx_burst_type;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 	int i;
 	struct iavf_rx_queue *rxq;
@@ -3808,43 +3865,43 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 				}
 			}
 			if (use_flex) {
-				dev->rx_pkt_burst = iavf_recv_scattered_pkts_vec_flex_rxd;
+				rx_burst_type = IAVF_RX_SSE_SCATTERED_FLEX_RXD;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2_flex_rxd;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED_FLEX_RXD_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED_FLEX_RXD_OFFLOAD;
 				}
 #endif
 			} else {
-				dev->rx_pkt_burst = iavf_recv_scattered_pkts_vec;
+				rx_burst_type = IAVF_RX_SSE_SCATTERED;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2_offload;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512_offload;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED_OFFLOAD;
 				}
 #endif
 			}
@@ -3874,51 +3931,46 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 				}
 			}
 			if (use_flex) {
-				dev->rx_pkt_burst = iavf_recv_pkts_vec_flex_rxd;
+				rx_burst_type = IAVF_RX_SSE_FLEX_RXD;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2_flex_rxd;
+						rx_burst_type = IAVF_RX_AVX2_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2_flex_rxd_offload;
+						rx_burst_type = IAVF_RX_AVX2_FLEX_RXD_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512_flex_rxd;
+						rx_burst_type = IAVF_RX_AVX512_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_AVX512_FLEX_RXD_OFFLOAD;
 				}
 #endif
 			} else {
-				dev->rx_pkt_burst = iavf_recv_pkts_vec;
+				rx_burst_type = IAVF_RX_SSE;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2;
+						rx_burst_type = IAVF_RX_AVX2;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2_offload;
+						rx_burst_type = IAVF_RX_AVX2_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512;
+						rx_burst_type = IAVF_RX_AVX512;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512_offload;
+						rx_burst_type = IAVF_RX_AVX512_OFFLOAD;
 				}
 #endif
 			}
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->rx_pkt_burst = dev->rx_pkt_burst;
+			adapter->rx_burst_type = rx_burst_type;
 			dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
+		} else {
+			dev->rx_pkt_burst = iavf_rx_pkt_burst_ops[rx_burst_type];
 		}
 		return;
 	}
@@ -3934,11 +3986,13 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 			rxq = dev->data->rx_queues[i];
 			(void)iavf_rxq_vec_setup(rxq);
 		}
-		dev->rx_pkt_burst = iavf_recv_pkts_vec;
+		rx_burst_type = IAVF_RX_SSE;
 
 		if (no_poll_on_link_down) {
-			adapter->rx_pkt_burst = dev->rx_pkt_burst;
+			adapter->rx_burst_type = rx_burst_type;
 			dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
+		} else {
+			dev->rx_pkt_burst = iavf_rx_pkt_burst_ops[rx_burst_type];
 		}
 		return;
 	}
@@ -3947,25 +4001,27 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
 			    dev->data->port_id);
 		if (use_flex)
-			dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
+			rx_burst_type = IAVF_RX_SCATTERED_FLEX_RXD;
 		else
-			dev->rx_pkt_burst = iavf_recv_scattered_pkts;
+			rx_burst_type = IAVF_RX_SCATTERED;
 	} else if (adapter->rx_bulk_alloc_allowed) {
 		PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
 			    dev->data->port_id);
-		dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
+		rx_burst_type = IAVF_RX_BULK_ALLOC;
 	} else {
 		PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
 			    dev->data->port_id);
 		if (use_flex)
-			dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
+			rx_burst_type = IAVF_RX_FLEX_RXD;
 		else
-			dev->rx_pkt_burst = iavf_recv_pkts;
+			rx_burst_type = IAVF_RX_DEFAULT;
 	}
 
 	if (no_poll_on_link_down) {
-		adapter->rx_pkt_burst = dev->rx_pkt_burst;
+		adapter->rx_burst_type = rx_burst_type;
 		dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
+	} else {
+		dev->rx_pkt_burst = iavf_rx_pkt_burst_ops[rx_burst_type];
 	}
 }
 
@@ -3975,6 +4031,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	enum iavf_tx_burst_type tx_burst_type;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4010,11 +4067,11 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (use_sse) {
 			PMD_DRV_LOG(DEBUG, "Using Vector Tx (port %d).",
 				    dev->data->port_id);
-			dev->tx_pkt_burst = iavf_xmit_pkts_vec;
+			tx_burst_type = IAVF_TX_SSE;
 		}
 		if (use_avx2) {
 			if (check_ret == IAVF_VECTOR_PATH) {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2;
+				tx_burst_type = IAVF_TX_AVX2;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_CTX_OFFLOAD_PATH) {
@@ -4022,7 +4079,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 					"AVX2 does not support outer checksum offload.");
 				goto normal;
 			} else {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload;
+				tx_burst_type = IAVF_TX_AVX2_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4031,16 +4088,16 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 #ifdef CC_AVX512_SUPPORT
 		if (use_avx512) {
 			if (check_ret == IAVF_VECTOR_PATH) {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
+				tx_burst_type = IAVF_TX_AVX512;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_OFFLOAD_PATH) {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
+				tx_burst_type = IAVF_TX_AVX512_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_ctx_offload;
+				tx_burst_type = IAVF_TX_AVX512_CTX_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 CONTEXT OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4063,8 +4120,10 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->tx_pkt_burst = dev->tx_pkt_burst;
+			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else {
+			dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 		}
 		return;
 	}
@@ -4073,12 +4132,14 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 #endif
 	PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
 		    dev->data->port_id);
-	dev->tx_pkt_burst = iavf_xmit_pkts;
+	tx_burst_type = IAVF_TX_DEFAULT;
 	dev->tx_pkt_prepare = iavf_prep_pkts;
 
 	if (no_poll_on_link_down) {
-		adapter->tx_pkt_burst = dev->tx_pkt_burst;
+		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else {
+		dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 	}
 }
 
-- 
2.25.1


  reply	other threads:[~2024-01-02 11:11 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-21 10:12 [PATCH] net/iavf: add diagnostic support in TX path Mingjin Ye
2023-12-21 12:00 ` Zhang, Qi Z
2023-12-22 10:44 ` [PATCH v2] " Mingjin Ye
2023-12-22 11:37   ` Zhang, Qi Z
2023-12-25  2:48     ` Ye, MingjinX
2023-12-26 10:07   ` [PATCH v3] " Mingjin Ye
2023-12-27 10:16     ` [PATCH v4 1/2] " Mingjin Ye
2023-12-27 11:30       ` Zhang, Qi Z
2023-12-28 10:26       ` [PATCH v5 0/2] net/iavf: add diagnostics and fix error Mingjin Ye
2023-12-28 10:26         ` [PATCH v5 1/2] net/iavf: fix Tx path error in multi-process Mingjin Ye
2023-12-28 10:50           ` Zhang, Qi Z
2023-12-29 10:11           ` [PATCH v6 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
2023-12-29 10:11             ` [PATCH v6 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
2023-12-31  6:41               ` Zhang, Qi Z
2024-01-02 10:52               ` [PATCH v7 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
2024-01-02 10:52                 ` Mingjin Ye [this message]
2024-01-03  2:22                   ` [PATCH v7 1/2] net/iavf: fix Rx/Tx burst in multi-process Zhang, Qi Z
2024-01-02 10:52                 ` [PATCH v7 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
2024-01-03  2:54                   ` Zhang, Qi Z
2024-01-03 10:10                   ` [PATCH v8 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
2024-01-03 10:10                     ` [PATCH v8 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
2024-01-03 10:10                     ` [PATCH v8 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
2024-01-04 10:18                       ` [PATCH v9 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
2024-01-04 10:18                         ` [PATCH v9 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
2024-01-04 10:18                         ` [PATCH v9 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
2024-01-05  9:58                           ` [PATCH v10] " Mingjin Ye
2024-01-09 10:09                             ` [PATCH v11] " Mingjin Ye
2024-01-10  2:25                             ` Mingjin Ye
2024-02-09 14:43                               ` Burakov, Anatoly
2024-02-09 15:20                               ` Burakov, Anatoly
2024-02-19  9:55                               ` [PATCH v12] " Mingjin Ye
2024-02-29 18:38                                 ` Bruce Richardson
2024-03-04 12:34                                   ` Bruce Richardson
2024-01-05  0:44                       ` [PATCH v8 2/2] " Zhang, Qi Z
2023-12-29 10:11             ` [PATCH v6 " Mingjin Ye
2023-12-28 10:26         ` [PATCH v5 " Mingjin Ye

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240102105211.788819-2-mingjinx.ye@intel.com \
    --to=mingjinx.ye@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).