DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode
@ 2022-04-02  9:51 Ke Zhang
  2022-04-03  2:07 ` Zhang, Qi Z
  2022-04-14  9:29 ` [PATCH v2] " Ke Zhang
  0 siblings, 2 replies; 19+ messages in thread
From: Ke Zhang @ 2022-04-02  9:51 UTC (permalink / raw)
  To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang

In the multi process environment, the sub process
operates on the shared memory and changes the
function pointer of the main process, resulting in
the failure to find the address of the function when
main process releasing, resulting in crash.

similar with commit<20b631efe785819eb77aabbf500b3352e5731bdb>

Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c            | 27 ++++++++++++++-----------
 drivers/net/iavf/iavf_rxtx.h            |  6 +++---
 drivers/net/iavf/iavf_rxtx_vec_avx512.c |  4 ++--
 drivers/net/iavf/iavf_rxtx_vec_sse.c    |  8 ++++----
 4 files changed, 24 insertions(+), 21 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..197c03cd31 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,6 +362,9 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
 	}
 }
 
+const struct iavf_rxq_ops *iavf_rxq_release_mbufs_ops;
+const struct iavf_txq_ops *iavf_txq_release_mbufs_ops;
+
 static const struct iavf_rxq_ops def_rxq_ops = {
 	.release_mbufs = release_rxq_mbufs,
 };
@@ -674,7 +677,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->q_set = true;
 	dev->data->rx_queues[queue_idx] = rxq;
 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
-	rxq->ops = &def_rxq_ops;
+	iavf_rxq_release_mbufs_ops = &def_rxq_ops;
 
 	if (check_rx_bulk_allow(rxq) == true) {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +814,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->q_set = true;
 	dev->data->tx_queues[queue_idx] = txq;
 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
-	txq->ops = &def_txq_ops;
+	iavf_txq_release_mbufs_ops = &def_txq_ops;
 
 	if (check_tx_vec_allow(txq) == false) {
 		struct iavf_adapter *ad =
@@ -943,7 +946,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 
 	rxq = dev->data->rx_queues[rx_queue_id];
-	rxq->ops->release_mbufs(rxq);
+	iavf_rxq_release_mbufs_ops->release_mbufs(rxq);
 	reset_rx_queue(rxq);
 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -971,7 +974,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	}
 
 	txq = dev->data->tx_queues[tx_queue_id];
-	txq->ops->release_mbufs(txq);
+	iavf_txq_release_mbufs_ops->release_mbufs(txq);
 	reset_tx_queue(txq);
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -986,7 +989,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_rxq_release_mbufs_ops->release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1000,7 +1003,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_txq_release_mbufs_ops->release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1034,7 +1037,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		txq = dev->data->tx_queues[i];
 		if (!txq)
 			continue;
-		txq->ops->release_mbufs(txq);
+		iavf_txq_release_mbufs_ops->release_mbufs(txq);
 		reset_tx_queue(txq);
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
@@ -1042,7 +1045,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		rxq = dev->data->rx_queues[i];
 		if (!rxq)
 			continue;
-		rxq->ops->release_mbufs(rxq);
+		iavf_rxq_release_mbufs_ops->release_mbufs(rxq);
 		reset_rx_queue(rxq);
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
@@ -2825,7 +2828,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
 			rxq = dev->data->rx_queues[i];
-			(void)iavf_rxq_vec_setup(rxq);
+			(void)iavf_rxq_vec_setup(rxq, &iavf_rxq_release_mbufs_ops);
 		}
 
 		if (dev->data->scattered_rx) {
@@ -3008,11 +3011,11 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 				continue;
 #ifdef CC_AVX512_SUPPORT
 			if (use_avx512)
-				iavf_txq_vec_setup_avx512(txq);
+				iavf_txq_vec_setup_avx512(&iavf_txq_release_mbufs_ops);
 			else
-				iavf_txq_vec_setup(txq);
+				iavf_txq_vec_setup(&iavf_txq_release_mbufs_ops);
 #else
-			iavf_txq_vec_setup(txq);
+			iavf_txq_vec_setup(&iavf_txq_release_mbufs_ops);
 #endif
 		}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..7df501d784 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -657,8 +657,8 @@ uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 int iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
 int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
 int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
-int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
-int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
+int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq, const struct iavf_rxq_ops **rxq_ops);
+int iavf_txq_vec_setup(const struct iavf_txq_ops **txq_ops);
 uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 				   uint16_t nb_pkts);
 uint16_t iavf_recv_pkts_vec_avx512_offload(void *rx_queue,
@@ -687,7 +687,7 @@ uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 uint16_t iavf_xmit_pkts_vec_avx512_offload(void *tx_queue,
 					   struct rte_mbuf **tx_pkts,
 					   uint16_t nb_pkts);
-int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
+int iavf_txq_vec_setup_avx512(const struct iavf_txq_ops **txq_ops);
 
 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..08de34c87c 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -2017,9 +2017,9 @@ static const struct iavf_txq_ops avx512_vec_txq_ops = {
 };
 
 int __rte_cold
-iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup_avx512(const struct iavf_txq_ops **txq_ops)
 {
-	txq->ops = &avx512_vec_txq_ops;
+	*txq_ops = &avx512_vec_txq_ops;
 	return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..a782bed2e0 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1219,16 +1219,16 @@ static const struct iavf_txq_ops sse_vec_txq_ops = {
 };
 
 int __rte_cold
-iavf_txq_vec_setup(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup(const struct iavf_txq_ops **txq_ops)
 {
-	txq->ops = &sse_vec_txq_ops;
+	*txq_ops = &sse_vec_txq_ops;
 	return 0;
 }
 
 int __rte_cold
-iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
+iavf_rxq_vec_setup(struct iavf_rx_queue *rxq, const struct iavf_rxq_ops **rxq_ops)
 {
-	rxq->ops = &sse_vec_rxq_ops;
+	*rxq_ops = &sse_vec_rxq_ops;
 	return iavf_rxq_vec_setup_default(rxq);
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode
  2022-04-02  9:51 [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode Ke Zhang
@ 2022-04-03  2:07 ` Zhang, Qi Z
  2022-04-14  9:29 ` [PATCH v2] " Ke Zhang
  1 sibling, 0 replies; 19+ messages in thread
From: Zhang, Qi Z @ 2022-04-03  2:07 UTC (permalink / raw)
  To: Zhang, Ke1X, Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev; +Cc: Zhang, Ke1X



> -----Original Message-----
> From: Ke Zhang <ke1x.zhang@intel.com>
> Sent: Saturday, April 2, 2022 5:51 PM
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Cc: Zhang, Ke1X <ke1x.zhang@intel.com>
> Subject: [PATCH] net/iavf: fix iavf crashed on dev_stop when running in
> multi-process mode
> 
> In the multi process environment, the sub process operates on the shared
> memory and changes the function pointer of the main process, resulting in
> the failure to find the address of the function when main process releasing,
> resulting in crash.
> 
> similar with commit<20b631efe785819eb77aabbf500b3352e5731bdb>
> 
> Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
> ---
>  drivers/net/iavf/iavf_rxtx.c            | 27 ++++++++++++++-----------
>  drivers/net/iavf/iavf_rxtx.h            |  6 +++---
>  drivers/net/iavf/iavf_rxtx_vec_avx512.c |  4 ++--
>  drivers/net/iavf/iavf_rxtx_vec_sse.c    |  8 ++++----
>  4 files changed, 24 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index
> 16e8d021f9..197c03cd31 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -362,6 +362,9 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
>  	}
>  }
> 
> +const struct iavf_rxq_ops *iavf_rxq_release_mbufs_ops; const struct
> +iavf_txq_ops *iavf_txq_release_mbufs_ops;


> +
>  static const struct iavf_rxq_ops def_rxq_ops = {
>  	.release_mbufs = release_rxq_mbufs,
>  };
> @@ -674,7 +677,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
>  	rxq->q_set = true;
>  	dev->data->rx_queues[queue_idx] = rxq;
>  	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
> -	rxq->ops = &def_rxq_ops;
> +	iavf_rxq_release_mbufs_ops = &def_rxq_ops;

This is not correct. 
Now we replace per-queue ops with a global ops which is not expected.

Please reference method of below patch

commit 0ed16e01313e1f8930dc6a52b22159b20269d4e0
Author: Steve Yang <stevex.yang@intel.com>
Date:   Mon Feb 28 09:48:59 2022 +0000

    net/iavf: fix function pointer in multi-process

...


> 
> diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c
> b/drivers/net/iavf/iavf_rxtx_vec_sse.c
> index 717a227b2c..a782bed2e0 100644
> --- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
> +++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
> @@ -1219,16 +1219,16 @@ static const struct iavf_txq_ops sse_vec_txq_ops
> = {  };
> 
>  int __rte_cold
> -iavf_txq_vec_setup(struct iavf_tx_queue *txq)
> +iavf_txq_vec_setup(const struct iavf_txq_ops **txq_ops)
>  {
> -	txq->ops = &sse_vec_txq_ops;
> +	*txq_ops = &sse_vec_txq_ops;
>  	return 0;
>  }
> 
>  int __rte_cold
> -iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
> +iavf_rxq_vec_setup(struct iavf_rx_queue *rxq, const struct iavf_rxq_ops
> +**rxq_ops)
>  {
> -	rxq->ops = &sse_vec_rxq_ops;
> +	*rxq_ops = &sse_vec_rxq_ops;
>  	return iavf_rxq_vec_setup_default(rxq);  }

seems lots of redundant in  iavf_rxtx_vec.sse.c 

Can we move iavf_r(t)xq_vec_setup | sse_vec_r(t)xq_ops into iavf_rxtx.c and delete iavf_r(t)x_queue_release_mbufs_sse)?

Btw, We can keep this patch unchanged, a separated patch to refact the code is expected.


> 
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v2] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode
  2022-04-02  9:51 [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode Ke Zhang
  2022-04-03  2:07 ` Zhang, Qi Z
@ 2022-04-14  9:29 ` Ke Zhang
  2022-04-18  6:41   ` Zhang, Qi Z
                     ` (2 more replies)
  1 sibling, 3 replies; 19+ messages in thread
From: Ke Zhang @ 2022-04-14  9:29 UTC (permalink / raw)
  To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang

In the multi process environment, the sub process
operates on the shared memory and changes the
function pointer of the main process, resulting in
the failure to find the address of the function when
main process releasing, resulting in crash.

Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c | 31 ++++++++++++++++---------------
 1 file changed, 16 insertions(+), 15 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..1cef985fcc 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2822,12 +2822,12 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 		if (vf->vf_res->vf_cap_flags &
 			VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
 			use_flex = true;
-
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			rxq = dev->data->rx_queues[i];
-			(void)iavf_rxq_vec_setup(rxq);
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			for (i = 0; i < dev->data->nb_rx_queues; i++) {
+				rxq = dev->data->rx_queues[i];
+				(void)iavf_rxq_vec_setup(rxq);
+			}
 		}
-
 		if (dev->data->scattered_rx) {
 			if (!use_avx512) {
 				PMD_DRV_LOG(DEBUG,
@@ -3002,20 +3002,21 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		}
 #endif
 
-		for (i = 0; i < dev->data->nb_tx_queues; i++) {
-			txq = dev->data->tx_queues[i];
-			if (!txq)
-				continue;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			for (i = 0; i < dev->data->nb_tx_queues; i++) {
+				txq = dev->data->tx_queues[i];
+				if (!txq)
+					continue;
 #ifdef CC_AVX512_SUPPORT
-			if (use_avx512)
-				iavf_txq_vec_setup_avx512(txq);
-			else
-				iavf_txq_vec_setup(txq);
+				if (use_avx512)
+					iavf_txq_vec_setup_avx512(txq);
+				else
+					iavf_txq_vec_setup(txq);
 #else
-			iavf_txq_vec_setup(txq);
+				iavf_txq_vec_setup(txq);
 #endif
+			}
 		}
-
 		return;
 	}
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH v2] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode
  2022-04-14  9:29 ` [PATCH v2] " Ke Zhang
@ 2022-04-18  6:41   ` Zhang, Qi Z
  2022-05-07  6:24   ` [PATCH v3] fix mbuf release function point corrupt in multi-process Ke Zhang
  2022-05-09  1:16   ` Ke Zhang
  2 siblings, 0 replies; 19+ messages in thread
From: Zhang, Qi Z @ 2022-04-18  6:41 UTC (permalink / raw)
  To: Zhang, Ke1X, Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev; +Cc: Zhang, Ke1X



> -----Original Message-----
> From: Ke Zhang <ke1x.zhang@intel.com>
> Sent: Thursday, April 14, 2022 5:29 PM
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Cc: Zhang, Ke1X <ke1x.zhang@intel.com>
> Subject: [PATCH v2] net/iavf: fix iavf crashed on dev_stop when running in
> multi-process mode

It's not a good practice to take the failure test case as the title, please describe what actually the patch fixed.

e.g.:
fix mbuf release function point corrupt in multi-process

> 
> In the multi process environment, the sub process operates on the shared
> memory and changes the function pointer of the main process, resulting in
> the failure to find the address of the function when main process releasing,
> resulting in crash.
> 
> Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
> ---
>  drivers/net/iavf/iavf_rxtx.c | 31 ++++++++++++++++---------------
>  1 file changed, 16 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index
> 16e8d021f9..1cef985fcc 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -2822,12 +2822,12 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
>  		if (vf->vf_res->vf_cap_flags &
>  			VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
>  			use_flex = true;
> -
> -		for (i = 0; i < dev->data->nb_rx_queues; i++) {
> -			rxq = dev->data->rx_queues[i];
> -			(void)iavf_rxq_vec_setup(rxq);
> +		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> +			for (i = 0; i < dev->data->nb_rx_queues; i++) {
> +				rxq = dev->data->rx_queues[i];
> +				(void)iavf_rxq_vec_setup(rxq);
> +			}
>  		}

Now, you force rxq->ops only can be owned by primary process, which is not necessary.
Its better we still allow a secondary process to stop a queue (which will call rxq->ops)


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v3] fix mbuf release function point corrupt in multi-process
  2022-04-14  9:29 ` [PATCH v2] " Ke Zhang
  2022-04-18  6:41   ` Zhang, Qi Z
@ 2022-05-07  6:24   ` Ke Zhang
  2022-05-09  1:16   ` Ke Zhang
  2 siblings, 0 replies; 19+ messages in thread
From: Ke Zhang @ 2022-05-07  6:24 UTC (permalink / raw)
  To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang

In the multi process environment, the sub process operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.

Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c            | 50 +++++++++++++------------
 drivers/net/iavf/iavf_rxtx.h            |  6 +--
 drivers/net/iavf/iavf_rxtx_vec_avx512.c |  4 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c    |  8 ++--
 4 files changed, 35 insertions(+), 33 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 1cef985fcc..56d4dbf2a4 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,6 +362,9 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
 	}
 }
 
+const struct iavf_rxq_ops *iavf_rxq_release_mbufs_ops[RTE_MAX_QUEUES_PER_PORT];
+const struct iavf_txq_ops *iavf_txq_release_mbufs_ops[RTE_MAX_QUEUES_PER_PORT];
+
 static const struct iavf_rxq_ops def_rxq_ops = {
 	.release_mbufs = release_rxq_mbufs,
 };
@@ -674,7 +677,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->q_set = true;
 	dev->data->rx_queues[queue_idx] = rxq;
 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
-	rxq->ops = &def_rxq_ops;
+	iavf_rxq_release_mbufs_ops[queue_idx] = &def_rxq_ops;
 
 	if (check_rx_bulk_allow(rxq) == true) {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +814,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->q_set = true;
 	dev->data->tx_queues[queue_idx] = txq;
 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
-	txq->ops = &def_txq_ops;
+	iavf_txq_release_mbufs_ops[queue_idx] = &def_txq_ops;
 
 	if (check_tx_vec_allow(txq) == false) {
 		struct iavf_adapter *ad =
@@ -943,7 +946,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 
 	rxq = dev->data->rx_queues[rx_queue_id];
-	rxq->ops->release_mbufs(rxq);
+	iavf_rxq_release_mbufs_ops[rx_queue_id]->release_mbufs(rxq);
 	reset_rx_queue(rxq);
 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -971,7 +974,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	}
 
 	txq = dev->data->tx_queues[tx_queue_id];
-	txq->ops->release_mbufs(txq);
+	iavf_txq_release_mbufs_ops[tx_queue_id]->release_mbufs(txq);
 	reset_tx_queue(txq);
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -986,7 +989,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_rxq_release_mbufs_ops[qid]->release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1000,7 +1003,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_txq_release_mbufs_ops[qid]->release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1034,7 +1037,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		txq = dev->data->tx_queues[i];
 		if (!txq)
 			continue;
-		txq->ops->release_mbufs(txq);
+		iavf_txq_release_mbufs_ops[i]->release_mbufs(txq);
 		reset_tx_queue(txq);
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
@@ -1042,7 +1045,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		rxq = dev->data->rx_queues[i];
 		if (!rxq)
 			continue;
-		rxq->ops->release_mbufs(rxq);
+		iavf_rxq_release_mbufs_ops[i]->release_mbufs(rxq);
 		reset_rx_queue(rxq);
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
@@ -2822,12 +2825,12 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 		if (vf->vf_res->vf_cap_flags &
 			VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
 			use_flex = true;
-		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-			for (i = 0; i < dev->data->nb_rx_queues; i++) {
-				rxq = dev->data->rx_queues[i];
-				(void)iavf_rxq_vec_setup(rxq);
-			}
+
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			rxq = dev->data->rx_queues[i];
+			(void)iavf_rxq_vec_setup(rxq, &iavf_rxq_release_mbufs_ops[i]);
 		}
+
 		if (dev->data->scattered_rx) {
 			if (!use_avx512) {
 				PMD_DRV_LOG(DEBUG,
@@ -3002,21 +3005,20 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		}
 #endif
 
-		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-			for (i = 0; i < dev->data->nb_tx_queues; i++) {
-				txq = dev->data->tx_queues[i];
-				if (!txq)
-					continue;
+		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+			txq = dev->data->tx_queues[i];
+			if (!txq)
+				continue;
 #ifdef CC_AVX512_SUPPORT
-				if (use_avx512)
-					iavf_txq_vec_setup_avx512(txq);
-				else
-					iavf_txq_vec_setup(txq);
+			if (use_avx512)
+				iavf_txq_vec_setup_avx512(&iavf_txq_release_mbufs_ops[i]);
+			else
+				iavf_txq_vec_setup(&iavf_txq_release_mbufs_ops[i]);
 #else
-				iavf_txq_vec_setup(txq);
+			iavf_txq_vec_setup(&iavf_txq_release_mbufs_ops[i]);
 #endif
-			}
 		}
+
 		return;
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..7df501d784 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -657,8 +657,8 @@ uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 int iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
 int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
 int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
-int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
-int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
+int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq, const struct iavf_rxq_ops **rxq_ops);
+int iavf_txq_vec_setup(const struct iavf_txq_ops **txq_ops);
 uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 				   uint16_t nb_pkts);
 uint16_t iavf_recv_pkts_vec_avx512_offload(void *rx_queue,
@@ -687,7 +687,7 @@ uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 uint16_t iavf_xmit_pkts_vec_avx512_offload(void *tx_queue,
 					   struct rte_mbuf **tx_pkts,
 					   uint16_t nb_pkts);
-int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
+int iavf_txq_vec_setup_avx512(const struct iavf_txq_ops **txq_ops);
 
 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..08de34c87c 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -2017,9 +2017,9 @@ static const struct iavf_txq_ops avx512_vec_txq_ops = {
 };
 
 int __rte_cold
-iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup_avx512(const struct iavf_txq_ops **txq_ops)
 {
-	txq->ops = &avx512_vec_txq_ops;
+	*txq_ops = &avx512_vec_txq_ops;
 	return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..a782bed2e0 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1219,16 +1219,16 @@ static const struct iavf_txq_ops sse_vec_txq_ops = {
 };
 
 int __rte_cold
-iavf_txq_vec_setup(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup(const struct iavf_txq_ops **txq_ops)
 {
-	txq->ops = &sse_vec_txq_ops;
+	*txq_ops = &sse_vec_txq_ops;
 	return 0;
 }
 
 int __rte_cold
-iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
+iavf_rxq_vec_setup(struct iavf_rx_queue *rxq, const struct iavf_rxq_ops **rxq_ops)
 {
-	rxq->ops = &sse_vec_rxq_ops;
+	*rxq_ops = &sse_vec_rxq_ops;
 	return iavf_rxq_vec_setup_default(rxq);
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v3] fix mbuf release function point corrupt in multi-process
  2022-04-14  9:29 ` [PATCH v2] " Ke Zhang
  2022-04-18  6:41   ` Zhang, Qi Z
  2022-05-07  6:24   ` [PATCH v3] fix mbuf release function point corrupt in multi-process Ke Zhang
@ 2022-05-09  1:16   ` Ke Zhang
  2022-05-09  1:35     ` Zhang, Qi Z
  2022-05-10  2:54     ` [PATCH v4] " Ke Zhang
  2 siblings, 2 replies; 19+ messages in thread
From: Ke Zhang @ 2022-05-09  1:16 UTC (permalink / raw)
  To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang

In the multi process environment, the sub process operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.

Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c            | 50 ++++++++++++++++++++-----
 drivers/net/iavf/iavf_rxtx.h            | 12 ++++++
 drivers/net/iavf/iavf_rxtx_vec_avx512.c |  8 +---
 drivers/net/iavf/iavf_rxtx_vec_sse.c    | 16 ++------
 4 files changed, 58 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..8d7f3c4316 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,14 +362,44 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_rxq_ops def_rxq_ops = {
+static const
+struct iavf_rxq_ops def_rxq_ops = {
 	.release_mbufs = release_rxq_mbufs,
 };
 
-static const struct iavf_txq_ops def_txq_ops = {
+static const
+struct iavf_txq_ops def_txq_ops = {
 	.release_mbufs = release_txq_mbufs,
 };
 
+static const
+struct iavf_rxq_ops sse_vec_rxq_ops = {
+	.release_mbufs = iavf_rx_queue_release_mbufs_sse,
+};
+
+static const
+struct iavf_txq_ops sse_vec_txq_ops = {
+	.release_mbufs = iavf_tx_queue_release_mbufs_sse,
+};
+
+static const
+struct iavf_txq_ops avx512_vec_txq_ops = {
+	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+};
+
+static const
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[IAVF_REL_MBUFS_LAST + 1] = {
+	[IAVF_REL_MBUFS_NORMAL] = def_rxq_ops,
+	[IAVF_REL_MBUFS_VEC] = sse_vec_rxq_ops,
+};
+
+static const
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[IAVF_REL_MBUFS_LAST + 1] = {
+	[IAVF_REL_MBUFS_NORMAL] = def_txq_ops,
+	[IAVF_REL_MBUFS_VEC_AVX512] = avx512_vec_txq_ops,
+	[IAVF_REL_MBUFS_VEC] = sse_vec_txq_ops,
+};
+
 static inline void
 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
 				    struct rte_mbuf *mb,
@@ -674,7 +704,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->q_set = true;
 	dev->data->rx_queues[queue_idx] = rxq;
 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
-	rxq->ops = &def_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_NORMAL;
 
 	if (check_rx_bulk_allow(rxq) == true) {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +841,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->q_set = true;
 	dev->data->tx_queues[queue_idx] = txq;
 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
-	txq->ops = &def_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_NORMAL;
 
 	if (check_tx_vec_allow(txq) == false) {
 		struct iavf_adapter *ad =
@@ -943,7 +973,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 
 	rxq = dev->data->rx_queues[rx_queue_id];
-	rxq->ops->release_mbufs(rxq);
+	iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 	reset_rx_queue(rxq);
 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -971,7 +1001,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	}
 
 	txq = dev->data->tx_queues[tx_queue_id];
-	txq->ops->release_mbufs(txq);
+	iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 	reset_tx_queue(txq);
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -986,7 +1016,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1000,7 +1030,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1034,7 +1064,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		txq = dev->data->tx_queues[i];
 		if (!txq)
 			continue;
-		txq->ops->release_mbufs(txq);
+		iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 		reset_tx_queue(txq);
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
@@ -1042,7 +1072,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		rxq = dev->data->rx_queues[i];
 		if (!rxq)
 			continue;
-		rxq->ops->release_mbufs(rxq);
+		iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 		reset_rx_queue(rxq);
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..826f59da39 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -187,6 +187,7 @@ struct iavf_rx_queue {
 	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
 	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
 	uint8_t rxdid;
+	uint8_t rel_mbufs_type;
 
 	/* used for VPMD */
 	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
@@ -246,6 +247,7 @@ struct iavf_tx_queue {
 	uint16_t last_desc_cleaned;    /* last desc have been cleaned*/
 	uint16_t free_thresh;
 	uint16_t rs_thresh;
+	uint8_t rel_mbufs_type;
 
 	uint16_t port_id;
 	uint16_t queue_id;
@@ -389,6 +391,13 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
 	__le32 ipsec_said;
 };
 
+enum iavf_rxtx_rel_mbufs_type {
+	IAVF_REL_MBUFS_NORMAL	= 0,
+	IAVF_REL_MBUFS_VEC_AVX512	= 1,
+	IAVF_REL_MBUFS_VEC		= 2,
+	IAVF_REL_MBUFS_LAST		= 63,
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -692,6 +701,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
 void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
 
 static inline
 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..3636f27527 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
 }
 
-static inline void
+void
 iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 {
 	unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
 int __rte_cold
 iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
 {
-	txq->ops = &avx512_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_VEC_AVX512;
 	return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..f7fda6abd7 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
-static void __rte_cold
+void
 iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
 {
 	_iavf_rx_queue_release_mbufs_vec(rxq);
 }
 
-static void __rte_cold
+void
 iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
 {
 	_iavf_tx_queue_release_mbufs_vec(txq);
 }
 
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
-	.release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
 int __rte_cold
 iavf_txq_vec_setup(struct iavf_tx_queue *txq)
 {
-	txq->ops = &sse_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_VEC;
 	return 0;
 }
 
 int __rte_cold
 iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
 {
-	rxq->ops = &sse_vec_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_VEC;
 	return iavf_rxq_vec_setup_default(rxq);
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH v3] fix mbuf release function point corrupt in multi-process
  2022-05-09  1:16   ` Ke Zhang
@ 2022-05-09  1:35     ` Zhang, Qi Z
  2022-05-10  2:54     ` [PATCH v4] " Ke Zhang
  1 sibling, 0 replies; 19+ messages in thread
From: Zhang, Qi Z @ 2022-05-09  1:35 UTC (permalink / raw)
  To: Zhang, Ke1X, Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev; +Cc: Zhang, Ke1X



> -----Original Message-----
> From: Ke Zhang <ke1x.zhang@intel.com>
> Sent: Monday, May 9, 2022 9:16 AM
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Cc: Zhang, Ke1X <ke1x.zhang@intel.com>
> Subject: [PATCH v3] fix mbuf release function point corrupt in multi-process
> 
> In the multi process environment, the sub process operates on the shared
> memory and changes the function pointer of the main process, resulting in the
> failure to find the address of the function when main process releasing,
> resulting in crash.
> 
> Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
> ---
>  drivers/net/iavf/iavf_rxtx.c            | 50 ++++++++++++++++++++-----
>  drivers/net/iavf/iavf_rxtx.h            | 12 ++++++
>  drivers/net/iavf/iavf_rxtx_vec_avx512.c |  8 +---
>  drivers/net/iavf/iavf_rxtx_vec_sse.c    | 16 ++------
>  4 files changed, 58 insertions(+), 28 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index
> 16e8d021f9..8d7f3c4316 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -362,14 +362,44 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
>  	}
>  }
> 
> -static const struct iavf_rxq_ops def_rxq_ops = {
> +static const
> +struct iavf_rxq_ops def_rxq_ops = {
>  	.release_mbufs = release_rxq_mbufs,
>  };
> 
> -static const struct iavf_txq_ops def_txq_ops = {
> +static const
> +struct iavf_txq_ops def_txq_ops = {
>  	.release_mbufs = release_txq_mbufs,
>  };
> 
> +static const
> +struct iavf_rxq_ops sse_vec_rxq_ops = {
> +	.release_mbufs = iavf_rx_queue_release_mbufs_sse, };
> +
> +static const
> +struct iavf_txq_ops sse_vec_txq_ops = {
> +	.release_mbufs = iavf_tx_queue_release_mbufs_sse, };
> +
> +static const
> +struct iavf_txq_ops avx512_vec_txq_ops = {
> +	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
> +};
> +
> +static const
> +struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[IAVF_REL_MBUFS_LAST + 1]
> = {
> +	[IAVF_REL_MBUFS_NORMAL] = def_rxq_ops,
> +	[IAVF_REL_MBUFS_VEC] = sse_vec_rxq_ops, };

Please name the macro align with the ops name, replace NORMAL with DEFAULT and VEC with SSE

> +
> +static const
> +struct iavf_txq_ops iavf_txq_release_mbufs_ops[IAVF_REL_MBUFS_LAST + 1]

> = {
> +	[IAVF_REL_MBUFS_NORMAL] = def_txq_ops,
> +	[IAVF_REL_MBUFS_VEC_AVX512] = avx512_vec_txq_ops,
> +	[IAVF_REL_MBUFS_VEC] = sse_vec_txq_ops, };


Please re-order it to align with Rx,  default  -> sse -> avx512.

> +
>  static inline void
>  iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue
> *rxq,
>  				    struct rte_mbuf *mb,
> @@ -674,7 +704,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
>  	rxq->q_set = true;
>  	dev->data->rx_queues[queue_idx] = rxq;
>  	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
> -	rxq->ops = &def_rxq_ops;
> +	rxq->rel_mbufs_type = IAVF_REL_MBUFS_NORMAL;
> 
>  	if (check_rx_bulk_allow(rxq) == true) {
>  		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are
> "
> @@ -811,7 +841,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
>  	txq->q_set = true;
>  	dev->data->tx_queues[queue_idx] = txq;
>  	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
> -	txq->ops = &def_txq_ops;
> +	txq->rel_mbufs_type = IAVF_REL_MBUFS_NORMAL;
> 
>  	if (check_tx_vec_allow(txq) == false) {
>  		struct iavf_adapter *ad =
> @@ -943,7 +973,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev,
> uint16_t rx_queue_id)
>  	}
> 
>  	rxq = dev->data->rx_queues[rx_queue_id];
> -	rxq->ops->release_mbufs(rxq);
> +	iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
>  	reset_rx_queue(rxq);
>  	dev->data->rx_queue_state[rx_queue_id] =
> RTE_ETH_QUEUE_STATE_STOPPED;
> 
> @@ -971,7 +1001,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev,
> uint16_t tx_queue_id)
>  	}
> 
>  	txq = dev->data->tx_queues[tx_queue_id];
> -	txq->ops->release_mbufs(txq);
> +	iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
>  	reset_tx_queue(txq);
>  	dev->data->tx_queue_state[tx_queue_id] =
> RTE_ETH_QUEUE_STATE_STOPPED;
> 
> @@ -986,7 +1016,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev,
> uint16_t qid)
>  	if (!q)
>  		return;
> 
> -	q->ops->release_mbufs(q);
> +	iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
>  	rte_free(q->sw_ring);
>  	rte_memzone_free(q->mz);
>  	rte_free(q);
> @@ -1000,7 +1030,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev,
> uint16_t qid)
>  	if (!q)
>  		return;
> 
> -	q->ops->release_mbufs(q);
> +	iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
>  	rte_free(q->sw_ring);
>  	rte_memzone_free(q->mz);
>  	rte_free(q);
> @@ -1034,7 +1064,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
>  		txq = dev->data->tx_queues[i];
>  		if (!txq)
>  			continue;
> -		txq->ops->release_mbufs(txq);
> +		iavf_txq_release_mbufs_ops[txq-
> >rel_mbufs_type].release_mbufs(txq);
>  		reset_tx_queue(txq);
>  		dev->data->tx_queue_state[i] =
> RTE_ETH_QUEUE_STATE_STOPPED;
>  	}
> @@ -1042,7 +1072,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
>  		rxq = dev->data->rx_queues[i];
>  		if (!rxq)
>  			continue;
> -		rxq->ops->release_mbufs(rxq);
> +		iavf_rxq_release_mbufs_ops[rxq-
> >rel_mbufs_type].release_mbufs(rxq);
>  		reset_rx_queue(rxq);
>  		dev->data->rx_queue_state[i] =
> RTE_ETH_QUEUE_STATE_STOPPED;
>  	}
> diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index
> bf8aebbce8..826f59da39 100644
> --- a/drivers/net/iavf/iavf_rxtx.h
> +++ b/drivers/net/iavf/iavf_rxtx.h
> @@ -187,6 +187,7 @@ struct iavf_rx_queue {
>  	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
>  	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
>  	uint8_t rxdid;
> +	uint8_t rel_mbufs_type;
> 
>  	/* used for VPMD */
>  	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
> @@ -246,6 +247,7 @@ struct iavf_tx_queue {
>  	uint16_t last_desc_cleaned;    /* last desc have been cleaned*/
>  	uint16_t free_thresh;
>  	uint16_t rs_thresh;
> +	uint8_t rel_mbufs_type;
> 
>  	uint16_t port_id;
>  	uint16_t queue_id;
> @@ -389,6 +391,13 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
>  	__le32 ipsec_said;
>  };
> 
> +enum iavf_rxtx_rel_mbufs_type {
> +	IAVF_REL_MBUFS_NORMAL	= 0,
> +	IAVF_REL_MBUFS_VEC_AVX512	= 1,
> +	IAVF_REL_MBUFS_VEC		= 2,
> +	IAVF_REL_MBUFS_LAST		= 63,

IAVF_REL_MBUFS_LAST is not necessary.



^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v4] fix mbuf release function point corrupt in multi-process
  2022-05-09  1:16   ` Ke Zhang
  2022-05-09  1:35     ` Zhang, Qi Z
@ 2022-05-10  2:54     ` Ke Zhang
  2022-05-12  2:21       ` [PATCH v5] " Ke Zhang
                         ` (2 more replies)
  1 sibling, 3 replies; 19+ messages in thread
From: Ke Zhang @ 2022-05-10  2:54 UTC (permalink / raw)
  To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang

In the multi process environment, the sub process operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.

Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c            | 50 ++++++++++++++++++++-----
 drivers/net/iavf/iavf_rxtx.h            | 11 ++++++
 drivers/net/iavf/iavf_rxtx_vec_avx512.c |  8 +---
 drivers/net/iavf/iavf_rxtx_vec_sse.c    | 16 ++------
 4 files changed, 57 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..67d00ee5bc 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,14 +362,44 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_rxq_ops def_rxq_ops = {
+static const
+struct iavf_rxq_ops def_rxq_ops = {
 	.release_mbufs = release_rxq_mbufs,
 };
 
-static const struct iavf_txq_ops def_txq_ops = {
+static const
+struct iavf_txq_ops def_txq_ops = {
 	.release_mbufs = release_txq_mbufs,
 };
 
+static const
+struct iavf_rxq_ops sse_vec_rxq_ops = {
+	.release_mbufs = iavf_rx_queue_release_mbufs_sse,
+};
+
+static const
+struct iavf_txq_ops sse_vec_txq_ops = {
+	.release_mbufs = iavf_tx_queue_release_mbufs_sse,
+};
+
+static const
+struct iavf_txq_ops avx512_vec_txq_ops = {
+	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+};
+
+static const
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT] = def_rxq_ops,
+	[IAVF_REL_MBUFS_SSE_VEC] = sse_vec_rxq_ops,
+};
+
+static const
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT] = def_txq_ops,
+	[IAVF_REL_MBUFS_SSE_VEC] = sse_vec_txq_ops,
+	[IAVF_REL_MBUFS_AVX512_VEC] = avx512_vec_txq_ops,
+};
+
 static inline void
 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
 				    struct rte_mbuf *mb,
@@ -674,7 +704,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->q_set = true;
 	dev->data->rx_queues[queue_idx] = rxq;
 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
-	rxq->ops = &def_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_rx_bulk_allow(rxq) == true) {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +841,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->q_set = true;
 	dev->data->tx_queues[queue_idx] = txq;
 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
-	txq->ops = &def_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_tx_vec_allow(txq) == false) {
 		struct iavf_adapter *ad =
@@ -943,7 +973,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 
 	rxq = dev->data->rx_queues[rx_queue_id];
-	rxq->ops->release_mbufs(rxq);
+	iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 	reset_rx_queue(rxq);
 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -971,7 +1001,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	}
 
 	txq = dev->data->tx_queues[tx_queue_id];
-	txq->ops->release_mbufs(txq);
+	iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 	reset_tx_queue(txq);
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -986,7 +1016,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1000,7 +1030,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1034,7 +1064,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		txq = dev->data->tx_queues[i];
 		if (!txq)
 			continue;
-		txq->ops->release_mbufs(txq);
+		iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 		reset_tx_queue(txq);
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
@@ -1042,7 +1072,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		rxq = dev->data->rx_queues[i];
 		if (!rxq)
 			continue;
-		rxq->ops->release_mbufs(rxq);
+		iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 		reset_rx_queue(rxq);
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..48cc0da6f5 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -187,6 +187,7 @@ struct iavf_rx_queue {
 	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
 	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
 	uint8_t rxdid;
+	uint8_t rel_mbufs_type;
 
 	/* used for VPMD */
 	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
@@ -246,6 +247,7 @@ struct iavf_tx_queue {
 	uint16_t last_desc_cleaned;    /* last desc have been cleaned*/
 	uint16_t free_thresh;
 	uint16_t rs_thresh;
+	uint8_t rel_mbufs_type;
 
 	uint16_t port_id;
 	uint16_t queue_id;
@@ -389,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
 	__le32 ipsec_said;
 };
 
+enum iavf_rxtx_rel_mbufs_type {
+	IAVF_REL_MBUFS_DEFAULT		= 0,
+	IAVF_REL_MBUFS_SSE_VEC		= 1,
+	IAVF_REL_MBUFS_AVX512_VEC	= 2,
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -692,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
 void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
 
 static inline
 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..665ca84762 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
 }
 
-static inline void
+void
 iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 {
 	unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
 int __rte_cold
 iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
 {
-	txq->ops = &avx512_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
 	return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..f8db1b152a 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
-static void __rte_cold
+void
 iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
 {
 	_iavf_rx_queue_release_mbufs_vec(rxq);
 }
 
-static void __rte_cold
+void
 iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
 {
 	_iavf_tx_queue_release_mbufs_vec(txq);
 }
 
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
-	.release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
 int __rte_cold
 iavf_txq_vec_setup(struct iavf_tx_queue *txq)
 {
-	txq->ops = &sse_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return 0;
 }
 
 int __rte_cold
 iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
 {
-	rxq->ops = &sse_vec_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return iavf_rxq_vec_setup_default(rxq);
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v5] fix mbuf release function point corrupt in multi-process
  2022-05-10  2:54     ` [PATCH v4] " Ke Zhang
@ 2022-05-12  2:21       ` Ke Zhang
  2022-05-12  5:57       ` Ke Zhang
  2022-05-12  7:44       ` Ke Zhang
  2 siblings, 0 replies; 19+ messages in thread
From: Ke Zhang @ 2022-05-12  2:21 UTC (permalink / raw)
  To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang

In the multiple process environment, the subprocess operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.

Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c            | 50 ++++++++++++++++++++-----
 drivers/net/iavf/iavf_rxtx.h            | 11 ++++++
 drivers/net/iavf/iavf_rxtx_vec_avx512.c |  8 +---
 drivers/net/iavf/iavf_rxtx_vec_sse.c    | 16 ++------
 4 files changed, 57 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..c4f43c9d0b 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,14 +362,44 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_rxq_ops def_rxq_ops = {
+static const
+struct iavf_rxq_ops def_rxq_ops = {
 	.release_mbufs = release_rxq_mbufs,
 };
 
-static const struct iavf_txq_ops def_txq_ops = {
+static const
+struct iavf_txq_ops def_txq_ops = {
 	.release_mbufs = release_txq_mbufs,
 };
 
+static const
+struct iavf_rxq_ops sse_vec_rxq_ops = {
+	.release_mbufs = iavf_rx_queue_release_mbufs_sse,
+};
+
+static const
+struct iavf_txq_ops sse_vec_txq_ops = {
+	.release_mbufs = iavf_tx_queue_release_mbufs_sse,
+};
+
+static const
+struct iavf_txq_ops avx512_vec_txq_ops = {
+	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+};
+
+static
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT] = def_rxq_ops,
+	[IAVF_REL_MBUFS_SSE_VEC] = sse_vec_rxq_ops,
+};
+
+static
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT] = def_txq_ops,
+	[IAVF_REL_MBUFS_SSE_VEC] = sse_vec_txq_ops,
+	[IAVF_REL_MBUFS_AVX512_VEC] = avx512_vec_txq_ops,
+};
+
 static inline void
 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
 				    struct rte_mbuf *mb,
@@ -674,7 +704,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->q_set = true;
 	dev->data->rx_queues[queue_idx] = rxq;
 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
-	rxq->ops = &def_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_rx_bulk_allow(rxq) == true) {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +841,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->q_set = true;
 	dev->data->tx_queues[queue_idx] = txq;
 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
-	txq->ops = &def_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_tx_vec_allow(txq) == false) {
 		struct iavf_adapter *ad =
@@ -943,7 +973,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 
 	rxq = dev->data->rx_queues[rx_queue_id];
-	rxq->ops->release_mbufs(rxq);
+	iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 	reset_rx_queue(rxq);
 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -971,7 +1001,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	}
 
 	txq = dev->data->tx_queues[tx_queue_id];
-	txq->ops->release_mbufs(txq);
+	iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 	reset_tx_queue(txq);
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -986,7 +1016,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1000,7 +1030,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1034,7 +1064,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		txq = dev->data->tx_queues[i];
 		if (!txq)
 			continue;
-		txq->ops->release_mbufs(txq);
+		iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 		reset_tx_queue(txq);
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
@@ -1042,7 +1072,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		rxq = dev->data->rx_queues[i];
 		if (!rxq)
 			continue;
-		rxq->ops->release_mbufs(rxq);
+		iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 		reset_rx_queue(rxq);
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..48cc0da6f5 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -187,6 +187,7 @@ struct iavf_rx_queue {
 	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
 	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
 	uint8_t rxdid;
+	uint8_t rel_mbufs_type;
 
 	/* used for VPMD */
 	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
@@ -246,6 +247,7 @@ struct iavf_tx_queue {
 	uint16_t last_desc_cleaned;    /* last desc have been cleaned*/
 	uint16_t free_thresh;
 	uint16_t rs_thresh;
+	uint8_t rel_mbufs_type;
 
 	uint16_t port_id;
 	uint16_t queue_id;
@@ -389,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
 	__le32 ipsec_said;
 };
 
+enum iavf_rxtx_rel_mbufs_type {
+	IAVF_REL_MBUFS_DEFAULT		= 0,
+	IAVF_REL_MBUFS_SSE_VEC		= 1,
+	IAVF_REL_MBUFS_AVX512_VEC	= 2,
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -692,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
 void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
 
 static inline
 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..665ca84762 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
 }
 
-static inline void
+void
 iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 {
 	unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
 int __rte_cold
 iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
 {
-	txq->ops = &avx512_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
 	return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..f8db1b152a 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
-static void __rte_cold
+void
 iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
 {
 	_iavf_rx_queue_release_mbufs_vec(rxq);
 }
 
-static void __rte_cold
+void
 iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
 {
 	_iavf_tx_queue_release_mbufs_vec(txq);
 }
 
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
-	.release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
 int __rte_cold
 iavf_txq_vec_setup(struct iavf_tx_queue *txq)
 {
-	txq->ops = &sse_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return 0;
 }
 
 int __rte_cold
 iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
 {
-	rxq->ops = &sse_vec_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return iavf_rxq_vec_setup_default(rxq);
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v5] fix mbuf release function point corrupt in multi-process
  2022-05-10  2:54     ` [PATCH v4] " Ke Zhang
  2022-05-12  2:21       ` [PATCH v5] " Ke Zhang
@ 2022-05-12  5:57       ` Ke Zhang
  2022-05-12 17:26         ` Stephen Hemminger
  2022-05-12  7:44       ` Ke Zhang
  2 siblings, 1 reply; 19+ messages in thread
From: Ke Zhang @ 2022-05-12  5:57 UTC (permalink / raw)
  To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang

In the multiple process environment, the subprocess operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.

Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c            | 29 +++++++++++++++----------
 drivers/net/iavf/iavf_rxtx.h            | 11 ++++++++++
 drivers/net/iavf/iavf_rxtx_vec_avx512.c |  8 ++-----
 drivers/net/iavf/iavf_rxtx_vec_sse.c    | 16 ++++----------
 4 files changed, 34 insertions(+), 30 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..a4a6fcee10 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,12 +362,17 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_rxq_ops def_rxq_ops = {
-	.release_mbufs = release_rxq_mbufs,
+static
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
 };
 
-static const struct iavf_txq_ops def_txq_ops = {
-	.release_mbufs = release_txq_mbufs,
+static
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+	[IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
 };
 
 static inline void
@@ -674,7 +679,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->q_set = true;
 	dev->data->rx_queues[queue_idx] = rxq;
 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
-	rxq->ops = &def_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_rx_bulk_allow(rxq) == true) {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +816,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->q_set = true;
 	dev->data->tx_queues[queue_idx] = txq;
 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
-	txq->ops = &def_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_tx_vec_allow(txq) == false) {
 		struct iavf_adapter *ad =
@@ -943,7 +948,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 
 	rxq = dev->data->rx_queues[rx_queue_id];
-	rxq->ops->release_mbufs(rxq);
+	iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 	reset_rx_queue(rxq);
 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -971,7 +976,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	}
 
 	txq = dev->data->tx_queues[tx_queue_id];
-	txq->ops->release_mbufs(txq);
+	iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 	reset_tx_queue(txq);
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -986,7 +991,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1000,7 +1005,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1034,7 +1039,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		txq = dev->data->tx_queues[i];
 		if (!txq)
 			continue;
-		txq->ops->release_mbufs(txq);
+		iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 		reset_tx_queue(txq);
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
@@ -1042,7 +1047,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		rxq = dev->data->rx_queues[i];
 		if (!rxq)
 			continue;
-		rxq->ops->release_mbufs(rxq);
+		iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 		reset_rx_queue(rxq);
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..48cc0da6f5 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -187,6 +187,7 @@ struct iavf_rx_queue {
 	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
 	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
 	uint8_t rxdid;
+	uint8_t rel_mbufs_type;
 
 	/* used for VPMD */
 	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
@@ -246,6 +247,7 @@ struct iavf_tx_queue {
 	uint16_t last_desc_cleaned;    /* last desc have been cleaned*/
 	uint16_t free_thresh;
 	uint16_t rs_thresh;
+	uint8_t rel_mbufs_type;
 
 	uint16_t port_id;
 	uint16_t queue_id;
@@ -389,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
 	__le32 ipsec_said;
 };
 
+enum iavf_rxtx_rel_mbufs_type {
+	IAVF_REL_MBUFS_DEFAULT		= 0,
+	IAVF_REL_MBUFS_SSE_VEC		= 1,
+	IAVF_REL_MBUFS_AVX512_VEC	= 2,
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -692,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
 void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
 
 static inline
 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..665ca84762 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
 }
 
-static inline void
+void
 iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 {
 	unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
 int __rte_cold
 iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
 {
-	txq->ops = &avx512_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
 	return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..f8db1b152a 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
-static void __rte_cold
+void
 iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
 {
 	_iavf_rx_queue_release_mbufs_vec(rxq);
 }
 
-static void __rte_cold
+void
 iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
 {
 	_iavf_tx_queue_release_mbufs_vec(txq);
 }
 
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
-	.release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
 int __rte_cold
 iavf_txq_vec_setup(struct iavf_tx_queue *txq)
 {
-	txq->ops = &sse_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return 0;
 }
 
 int __rte_cold
 iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
 {
-	rxq->ops = &sse_vec_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return iavf_rxq_vec_setup_default(rxq);
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v5] fix mbuf release function point corrupt in multi-process
  2022-05-10  2:54     ` [PATCH v4] " Ke Zhang
  2022-05-12  2:21       ` [PATCH v5] " Ke Zhang
  2022-05-12  5:57       ` Ke Zhang
@ 2022-05-12  7:44       ` Ke Zhang
  2022-05-16  6:41         ` [PATCH v6] " Ke Zhang
  2022-05-16  6:55         ` Ke Zhang
  2 siblings, 2 replies; 19+ messages in thread
From: Ke Zhang @ 2022-05-12  7:44 UTC (permalink / raw)
  To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang

In the multiple process environment, the subprocess operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.

Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c            | 29 +++++++++++++++----------
 drivers/net/iavf/iavf_rxtx.h            | 11 ++++++++++
 drivers/net/iavf/iavf_rxtx_vec_avx512.c |  8 ++-----
 drivers/net/iavf/iavf_rxtx_vec_sse.c    | 16 ++++----------
 4 files changed, 34 insertions(+), 30 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..a4a6fcee10 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,12 +362,17 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_rxq_ops def_rxq_ops = {
-	.release_mbufs = release_rxq_mbufs,
+static
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
 };
 
-static const struct iavf_txq_ops def_txq_ops = {
-	.release_mbufs = release_txq_mbufs,
+static
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+	[IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
 };
 
 static inline void
@@ -674,7 +679,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->q_set = true;
 	dev->data->rx_queues[queue_idx] = rxq;
 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
-	rxq->ops = &def_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_rx_bulk_allow(rxq) == true) {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +816,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->q_set = true;
 	dev->data->tx_queues[queue_idx] = txq;
 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
-	txq->ops = &def_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_tx_vec_allow(txq) == false) {
 		struct iavf_adapter *ad =
@@ -943,7 +948,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 
 	rxq = dev->data->rx_queues[rx_queue_id];
-	rxq->ops->release_mbufs(rxq);
+	iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 	reset_rx_queue(rxq);
 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -971,7 +976,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	}
 
 	txq = dev->data->tx_queues[tx_queue_id];
-	txq->ops->release_mbufs(txq);
+	iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 	reset_tx_queue(txq);
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -986,7 +991,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1000,7 +1005,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1034,7 +1039,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		txq = dev->data->tx_queues[i];
 		if (!txq)
 			continue;
-		txq->ops->release_mbufs(txq);
+		iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 		reset_tx_queue(txq);
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
@@ -1042,7 +1047,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		rxq = dev->data->rx_queues[i];
 		if (!rxq)
 			continue;
-		rxq->ops->release_mbufs(rxq);
+		iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 		reset_rx_queue(rxq);
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..48cc0da6f5 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -187,6 +187,7 @@ struct iavf_rx_queue {
 	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
 	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
 	uint8_t rxdid;
+	uint8_t rel_mbufs_type;
 
 	/* used for VPMD */
 	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
@@ -246,6 +247,7 @@ struct iavf_tx_queue {
 	uint16_t last_desc_cleaned;    /* last desc have been cleaned*/
 	uint16_t free_thresh;
 	uint16_t rs_thresh;
+	uint8_t rel_mbufs_type;
 
 	uint16_t port_id;
 	uint16_t queue_id;
@@ -389,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
 	__le32 ipsec_said;
 };
 
+enum iavf_rxtx_rel_mbufs_type {
+	IAVF_REL_MBUFS_DEFAULT		= 0,
+	IAVF_REL_MBUFS_SSE_VEC		= 1,
+	IAVF_REL_MBUFS_AVX512_VEC	= 2,
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -692,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
 void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
 
 static inline
 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..3bfec63851 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
 }
 
-static inline void
+void __rte_cold
 iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 {
 	unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
 int __rte_cold
 iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
 {
-	txq->ops = &avx512_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
 	return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..4a5232c1d2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
-static void __rte_cold
+void __rte_cold
 iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
 {
 	_iavf_rx_queue_release_mbufs_vec(rxq);
 }
 
-static void __rte_cold
+void __rte_cold
 iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
 {
 	_iavf_tx_queue_release_mbufs_vec(txq);
 }
 
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
-	.release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
 int __rte_cold
 iavf_txq_vec_setup(struct iavf_tx_queue *txq)
 {
-	txq->ops = &sse_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return 0;
 }
 
 int __rte_cold
 iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
 {
-	rxq->ops = &sse_vec_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return iavf_rxq_vec_setup_default(rxq);
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v5] fix mbuf release function point corrupt in multi-process
  2022-05-12  5:57       ` Ke Zhang
@ 2022-05-12 17:26         ` Stephen Hemminger
  2022-05-13  1:34           ` Zhang, Ke1X
  0 siblings, 1 reply; 19+ messages in thread
From: Stephen Hemminger @ 2022-05-12 17:26 UTC (permalink / raw)
  To: Ke Zhang; +Cc: xiaoyun.li, jingjing.wu, beilei.xing, dev

On Thu, 12 May 2022 05:57:19 +0000
Ke Zhang <ke1x.zhang@intel.com> wrote:

>  
> -static const struct iavf_rxq_ops def_rxq_ops = {
> -	.release_mbufs = release_rxq_mbufs,
> +static
> +struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
> +	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
> +	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
>  };
>  
> -static const struct iavf_txq_ops def_txq_ops = {
> -	.release_mbufs = release_txq_mbufs,
> +static
> +struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
> +	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
> +	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
> +	[IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
>  };

Did you have to take const off of these?

^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH v5] fix mbuf release function point corrupt in multi-process
  2022-05-12 17:26         ` Stephen Hemminger
@ 2022-05-13  1:34           ` Zhang, Ke1X
  2022-05-13  1:57             ` Stephen Hemminger
  0 siblings, 1 reply; 19+ messages in thread
From: Zhang, Ke1X @ 2022-05-13  1:34 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev


> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: Friday, May 13, 2022 1:27 AM
> To: Zhang, Ke1X <ke1x.zhang@intel.com>
> Cc: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Subject: Re: [PATCH v5] fix mbuf release function point corrupt in multi-
> process
> 
> On Thu, 12 May 2022 05:57:19 +0000
> Ke Zhang <ke1x.zhang@intel.com> wrote:
> 
> >
> > -static const struct iavf_rxq_ops def_rxq_ops = {
> > -	.release_mbufs = release_rxq_mbufs,
> > +static
> > +struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
> > +	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
> > +	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs =
> iavf_rx_queue_release_mbufs_sse,
> >  };
> >
> > -static const struct iavf_txq_ops def_txq_ops = {
> > -	.release_mbufs = release_txq_mbufs,
> > +static
> > +struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
> > +	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
> > +	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs =
> iavf_tx_queue_release_mbufs_sse,
> > +	[IAVF_REL_MBUFS_AVX512_VEC].release_mbufs =
> iavf_tx_queue_release_mbufs_avx512,
> >  };
> 
> Did you have to take const off of these?

Thanks for your comments, I check the other code like linux kernel , I found there are no const for the function pointer, like:

static struct pci_driver ice_driver = {
	.name = KBUILD_MODNAME,
	.id_table = ice_pci_tbl,
	.probe = ice_probe,
	.remove = ice_remove,
#ifdef CONFIG_PM
	.driver.pm = &ice_pm_ops,
#endif /* CONFIG_PM */
	.shutdown = ice_shutdown,
#ifndef STATIC_QOS_CFG_SUPPORT
	.sriov_configure = ice_sriov_configure,
#endif /* !STATIC_QOS_CFG_SUPPORT */
#ifdef HAVE_RHEL7_PCI_DRIVER_RH
	.pci_driver_rh = &ice_driver_rh,
#endif /* HAVE_RHEL7_PCI_DRIVER_RH */
	.err_handler = &ice_pci_err_handler
};

So I don't add the const.


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v5] fix mbuf release function point corrupt in multi-process
  2022-05-13  1:34           ` Zhang, Ke1X
@ 2022-05-13  1:57             ` Stephen Hemminger
  0 siblings, 0 replies; 19+ messages in thread
From: Stephen Hemminger @ 2022-05-13  1:57 UTC (permalink / raw)
  To: Zhang, Ke1X; +Cc: Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev

On Fri, 13 May 2022 01:34:02 +0000
"Zhang, Ke1X" <ke1x.zhang@intel.com> wrote:

> > -----Original Message-----
> > From: Stephen Hemminger <stephen@networkplumber.org>
> > Sent: Friday, May 13, 2022 1:27 AM
> > To: Zhang, Ke1X <ke1x.zhang@intel.com>
> > Cc: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> > Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> > Subject: Re: [PATCH v5] fix mbuf release function point corrupt in multi-
> > process
> > 
> > On Thu, 12 May 2022 05:57:19 +0000
> > Ke Zhang <ke1x.zhang@intel.com> wrote:
> >   
> > >
> > > -static const struct iavf_rxq_ops def_rxq_ops = {
> > > -	.release_mbufs = release_rxq_mbufs,
> > > +static
> > > +struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
> > > +	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
> > > +	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs =  
> > iavf_rx_queue_release_mbufs_sse,  
> > >  };
> > >
> > > -static const struct iavf_txq_ops def_txq_ops = {
> > > -	.release_mbufs = release_txq_mbufs,
> > > +static
> > > +struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
> > > +	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
> > > +	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs =  
> > iavf_tx_queue_release_mbufs_sse,  
> > > +	[IAVF_REL_MBUFS_AVX512_VEC].release_mbufs =  
> > iavf_tx_queue_release_mbufs_avx512,  
> > >  };  
> > 
> > Did you have to take const off of these?  
> 
> Thanks for your comments, I check the other code like linux kernel , I found there are no const for the function pointer, like:
> 
> static struct pci_driver ice_driver = {
> 	.name = KBUILD_MODNAME,
> 	.id_table = ice_pci_tbl,
> 	.probe = ice_probe,
> 	.remove = ice_remove,
> #ifdef CONFIG_PM
> 	.driver.pm = &ice_pm_ops,
> #endif /* CONFIG_PM */
> 	.shutdown = ice_shutdown,
> #ifndef STATIC_QOS_CFG_SUPPORT
> 	.sriov_configure = ice_sriov_configure,
> #endif /* !STATIC_QOS_CFG_SUPPORT */
> #ifdef HAVE_RHEL7_PCI_DRIVER_RH
> 	.pci_driver_rh = &ice_driver_rh,
> #endif /* HAVE_RHEL7_PCI_DRIVER_RH */
> 	.err_handler = &ice_pci_err_handler
> };
> 
> So I don't add the const.
> 

This is not the kernel!  The kernel pci device has other reasons
it can't be const. This is because the Linux kernel pci_driver structure
gets linked into the list of PCI devices. The kernel should be splitting the device object
(pci_driver) from the functions by introducing a new pci_driver_ops. 
But this would require lots of extra work; the kernel hardening project may get to it.

As a general rule: any table with function pointers should be const for security reasons.
The DPDK has less security requirements than the kernel and less security testing,
but developers should try to avoid issues if possible.



^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v6] fix mbuf release function point corrupt in multi-process
  2022-05-12  7:44       ` Ke Zhang
@ 2022-05-16  6:41         ` Ke Zhang
  2022-05-16  6:55         ` Ke Zhang
  1 sibling, 0 replies; 19+ messages in thread
From: Ke Zhang @ 2022-05-16  6:41 UTC (permalink / raw)
  To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang

In the multiple process environment, the subprocess operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.

Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c            | 35 ++++++++++++++++---------
 drivers/net/iavf/iavf_rxtx.h            | 11 ++++++++
 drivers/net/iavf/iavf_rxtx_vec_avx512.c |  8 ++----
 drivers/net/iavf/iavf_rxtx_vec_sse.c    | 16 +++--------
 4 files changed, 40 insertions(+), 30 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..adcc874171 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,12 +362,23 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_rxq_ops def_rxq_ops = {
-	.release_mbufs = release_rxq_mbufs,
+static const
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+#ifdef RTE_ARCH_X86
+	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
+#endif
 };
 
-static const struct iavf_txq_ops def_txq_ops = {
-	.release_mbufs = release_txq_mbufs,
+static const
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+#ifdef RTE_ARCH_X86
+	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+#endif
+#ifdef CC_AVX512_SUPPORT
+	[IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+#endif
 };
 
 static inline void
@@ -674,7 +685,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->q_set = true;
 	dev->data->rx_queues[queue_idx] = rxq;
 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
-	rxq->ops = &def_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_rx_bulk_allow(rxq) == true) {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +822,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->q_set = true;
 	dev->data->tx_queues[queue_idx] = txq;
 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
-	txq->ops = &def_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_tx_vec_allow(txq) == false) {
 		struct iavf_adapter *ad =
@@ -943,7 +954,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 
 	rxq = dev->data->rx_queues[rx_queue_id];
-	rxq->ops->release_mbufs(rxq);
+	iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 	reset_rx_queue(rxq);
 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -971,7 +982,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	}
 
 	txq = dev->data->tx_queues[tx_queue_id];
-	txq->ops->release_mbufs(txq);
+	iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 	reset_tx_queue(txq);
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -986,7 +997,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1000,7 +1011,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1034,7 +1045,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		txq = dev->data->tx_queues[i];
 		if (!txq)
 			continue;
-		txq->ops->release_mbufs(txq);
+		iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 		reset_tx_queue(txq);
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
@@ -1042,7 +1053,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		rxq = dev->data->rx_queues[i];
 		if (!rxq)
 			continue;
-		rxq->ops->release_mbufs(rxq);
+		iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 		reset_rx_queue(rxq);
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..48cc0da6f5 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -187,6 +187,7 @@ struct iavf_rx_queue {
 	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
 	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
 	uint8_t rxdid;
+	uint8_t rel_mbufs_type;
 
 	/* used for VPMD */
 	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
@@ -246,6 +247,7 @@ struct iavf_tx_queue {
 	uint16_t last_desc_cleaned;    /* last desc have been cleaned*/
 	uint16_t free_thresh;
 	uint16_t rs_thresh;
+	uint8_t rel_mbufs_type;
 
 	uint16_t port_id;
 	uint16_t queue_id;
@@ -389,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
 	__le32 ipsec_said;
 };
 
+enum iavf_rxtx_rel_mbufs_type {
+	IAVF_REL_MBUFS_DEFAULT		= 0,
+	IAVF_REL_MBUFS_SSE_VEC		= 1,
+	IAVF_REL_MBUFS_AVX512_VEC	= 2,
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -692,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
 void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
 
 static inline
 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..3bfec63851 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
 }
 
-static inline void
+void __rte_cold
 iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 {
 	unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
 int __rte_cold
 iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
 {
-	txq->ops = &avx512_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
 	return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..4a5232c1d2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
-static void __rte_cold
+void __rte_cold
 iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
 {
 	_iavf_rx_queue_release_mbufs_vec(rxq);
 }
 
-static void __rte_cold
+void __rte_cold
 iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
 {
 	_iavf_tx_queue_release_mbufs_vec(txq);
 }
 
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
-	.release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
 int __rte_cold
 iavf_txq_vec_setup(struct iavf_tx_queue *txq)
 {
-	txq->ops = &sse_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return 0;
 }
 
 int __rte_cold
 iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
 {
-	rxq->ops = &sse_vec_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return iavf_rxq_vec_setup_default(rxq);
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v6] fix mbuf release function point corrupt in multi-process
  2022-05-12  7:44       ` Ke Zhang
  2022-05-16  6:41         ` [PATCH v6] " Ke Zhang
@ 2022-05-16  6:55         ` Ke Zhang
  2022-05-17  7:27           ` Zhang, Qi Z
  2022-05-19  7:36           ` [PATCH v7] net/iavf: " Ke Zhang
  1 sibling, 2 replies; 19+ messages in thread
From: Ke Zhang @ 2022-05-16  6:55 UTC (permalink / raw)
  To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang

In the multiple process environment, the subprocess operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.

Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c            | 36 ++++++++++++++++---------
 drivers/net/iavf/iavf_rxtx.h            | 11 ++++++++
 drivers/net/iavf/iavf_rxtx_vec_avx512.c |  8 ++----
 drivers/net/iavf/iavf_rxtx_vec_sse.c    | 16 +++--------
 4 files changed, 41 insertions(+), 30 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..2f339b3703 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,12 +362,24 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_rxq_ops def_rxq_ops = {
-	.release_mbufs = release_rxq_mbufs,
+static const
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+#ifdef RTE_ARCH_X86
+	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
+#endif
 };
 
-static const struct iavf_txq_ops def_txq_ops = {
-	.release_mbufs = release_txq_mbufs,
+static const
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+#ifdef RTE_ARCH_X86
+	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+#ifdef CC_AVX512_SUPPORT
+	[IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+#endif
+#endif
+
 };
 
 static inline void
@@ -674,7 +686,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->q_set = true;
 	dev->data->rx_queues[queue_idx] = rxq;
 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
-	rxq->ops = &def_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_rx_bulk_allow(rxq) == true) {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +823,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->q_set = true;
 	dev->data->tx_queues[queue_idx] = txq;
 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
-	txq->ops = &def_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_tx_vec_allow(txq) == false) {
 		struct iavf_adapter *ad =
@@ -943,7 +955,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 
 	rxq = dev->data->rx_queues[rx_queue_id];
-	rxq->ops->release_mbufs(rxq);
+	iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 	reset_rx_queue(rxq);
 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -971,7 +983,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	}
 
 	txq = dev->data->tx_queues[tx_queue_id];
-	txq->ops->release_mbufs(txq);
+	iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 	reset_tx_queue(txq);
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -986,7 +998,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1000,7 +1012,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1034,7 +1046,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		txq = dev->data->tx_queues[i];
 		if (!txq)
 			continue;
-		txq->ops->release_mbufs(txq);
+		iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 		reset_tx_queue(txq);
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
@@ -1042,7 +1054,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		rxq = dev->data->rx_queues[i];
 		if (!rxq)
 			continue;
-		rxq->ops->release_mbufs(rxq);
+		iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 		reset_rx_queue(rxq);
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..48cc0da6f5 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -187,6 +187,7 @@ struct iavf_rx_queue {
 	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
 	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
 	uint8_t rxdid;
+	uint8_t rel_mbufs_type;
 
 	/* used for VPMD */
 	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
@@ -246,6 +247,7 @@ struct iavf_tx_queue {
 	uint16_t last_desc_cleaned;    /* last desc have been cleaned*/
 	uint16_t free_thresh;
 	uint16_t rs_thresh;
+	uint8_t rel_mbufs_type;
 
 	uint16_t port_id;
 	uint16_t queue_id;
@@ -389,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
 	__le32 ipsec_said;
 };
 
+enum iavf_rxtx_rel_mbufs_type {
+	IAVF_REL_MBUFS_DEFAULT		= 0,
+	IAVF_REL_MBUFS_SSE_VEC		= 1,
+	IAVF_REL_MBUFS_AVX512_VEC	= 2,
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -692,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
 void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
 
 static inline
 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..3bfec63851 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
 }
 
-static inline void
+void __rte_cold
 iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 {
 	unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
 int __rte_cold
 iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
 {
-	txq->ops = &avx512_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
 	return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..4a5232c1d2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
-static void __rte_cold
+void __rte_cold
 iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
 {
 	_iavf_rx_queue_release_mbufs_vec(rxq);
 }
 
-static void __rte_cold
+void __rte_cold
 iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
 {
 	_iavf_tx_queue_release_mbufs_vec(txq);
 }
 
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
-	.release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
 int __rte_cold
 iavf_txq_vec_setup(struct iavf_tx_queue *txq)
 {
-	txq->ops = &sse_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return 0;
 }
 
 int __rte_cold
 iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
 {
-	rxq->ops = &sse_vec_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return iavf_rxq_vec_setup_default(rxq);
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH v6] fix mbuf release function point corrupt in multi-process
  2022-05-16  6:55         ` Ke Zhang
@ 2022-05-17  7:27           ` Zhang, Qi Z
  2022-05-19  7:36           ` [PATCH v7] net/iavf: " Ke Zhang
  1 sibling, 0 replies; 19+ messages in thread
From: Zhang, Qi Z @ 2022-05-17  7:27 UTC (permalink / raw)
  To: Zhang, Ke1X, Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev; +Cc: Zhang, Ke1X



> -----Original Message-----
> From: Ke Zhang <ke1x.zhang@intel.com>
> Sent: Monday, May 16, 2022 2:55 PM
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Cc: Zhang, Ke1X <ke1x.zhang@intel.com>
> Subject: [PATCH v6] fix mbuf release function point corrupt in multi-process
> 

Please fix the title format

> In the multiple process environment, the subprocess operates on the shared
> memory and changes the function pointer of the main process, resulting in the
> failure to find the address of the function when main process releasing,
> resulting in crash.

Please add the fixline and Cc stable.

> 
> Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v7] net/iavf: fix mbuf release function point corrupt in multi-process
  2022-05-16  6:55         ` Ke Zhang
  2022-05-17  7:27           ` Zhang, Qi Z
@ 2022-05-19  7:36           ` Ke Zhang
  2022-05-19  9:25             ` Zhang, Qi Z
  1 sibling, 1 reply; 19+ messages in thread
From: Ke Zhang @ 2022-05-19  7:36 UTC (permalink / raw)
  To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang, stable

In the multiple process environment, the subprocess operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.

Fixes: 319c421f3890 ("net/avf: enable SSE Rx Tx")
Cc: stable@dpdk.org

Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c            | 36 ++++++++++++++++---------
 drivers/net/iavf/iavf_rxtx.h            | 11 ++++++++
 drivers/net/iavf/iavf_rxtx_vec_avx512.c |  8 ++----
 drivers/net/iavf/iavf_rxtx_vec_sse.c    | 16 +++--------
 4 files changed, 41 insertions(+), 30 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 345f6aeebc..bf1adfccef 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,12 +362,24 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_rxq_ops def_rxq_ops = {
-	.release_mbufs = release_rxq_mbufs,
+static const
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+#ifdef RTE_ARCH_X86
+	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
+#endif
 };
 
-static const struct iavf_txq_ops def_txq_ops = {
-	.release_mbufs = release_txq_mbufs,
+static const
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+	[IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+#ifdef RTE_ARCH_X86
+	[IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+#ifdef CC_AVX512_SUPPORT
+	[IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+#endif
+#endif
+
 };
 
 static inline void
@@ -678,7 +690,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->q_set = true;
 	dev->data->rx_queues[queue_idx] = rxq;
 	rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
-	rxq->ops = &def_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_rx_bulk_allow(rxq) == true) {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -815,7 +827,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->q_set = true;
 	dev->data->tx_queues[queue_idx] = txq;
 	txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
-	txq->ops = &def_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
 	if (check_tx_vec_allow(txq) == false) {
 		struct iavf_adapter *ad =
@@ -947,7 +959,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	}
 
 	rxq = dev->data->rx_queues[rx_queue_id];
-	rxq->ops->release_mbufs(rxq);
+	iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 	reset_rx_queue(rxq);
 	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -975,7 +987,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	}
 
 	txq = dev->data->tx_queues[tx_queue_id];
-	txq->ops->release_mbufs(txq);
+	iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 	reset_tx_queue(txq);
 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -990,7 +1002,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1004,7 +1016,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 	if (!q)
 		return;
 
-	q->ops->release_mbufs(q);
+	iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
 	rte_free(q->sw_ring);
 	rte_memzone_free(q->mz);
 	rte_free(q);
@@ -1038,7 +1050,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		txq = dev->data->tx_queues[i];
 		if (!txq)
 			continue;
-		txq->ops->release_mbufs(txq);
+		iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
 		reset_tx_queue(txq);
 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
@@ -1046,7 +1058,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		rxq = dev->data->rx_queues[i];
 		if (!rxq)
 			continue;
-		rxq->ops->release_mbufs(rxq);
+		iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
 		reset_rx_queue(rxq);
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 642b9a700a..e8362bbd1d 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -190,6 +190,7 @@ struct iavf_rx_queue {
 	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
 	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
 	uint8_t rxdid;
+	uint8_t rel_mbufs_type;
 
 	/* used for VPMD */
 	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
@@ -249,6 +250,7 @@ struct iavf_tx_queue {
 	uint16_t last_desc_cleaned;    /* last desc have been cleaned*/
 	uint16_t free_thresh;
 	uint16_t rs_thresh;
+	uint8_t rel_mbufs_type;
 
 	uint16_t port_id;
 	uint16_t queue_id;
@@ -392,6 +394,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
 	__le32 ipsec_said;
 };
 
+enum iavf_rxtx_rel_mbufs_type {
+	IAVF_REL_MBUFS_DEFAULT		= 0,
+	IAVF_REL_MBUFS_SSE_VEC		= 1,
+	IAVF_REL_MBUFS_AVX512_VEC	= 2,
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -695,6 +703,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
 void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
 
 static inline
 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..3bfec63851 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
 }
 
-static inline void
+void __rte_cold
 iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 {
 	unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
 	}
 }
 
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
 int __rte_cold
 iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
 {
-	txq->ops = &avx512_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
 	return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..4a5232c1d2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return nb_tx;
 }
 
-static void __rte_cold
+void __rte_cold
 iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
 {
 	_iavf_rx_queue_release_mbufs_vec(rxq);
 }
 
-static void __rte_cold
+void __rte_cold
 iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
 {
 	_iavf_tx_queue_release_mbufs_vec(txq);
 }
 
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
-	.release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
-	.release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
 int __rte_cold
 iavf_txq_vec_setup(struct iavf_tx_queue *txq)
 {
-	txq->ops = &sse_vec_txq_ops;
+	txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return 0;
 }
 
 int __rte_cold
 iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
 {
-	rxq->ops = &sse_vec_rxq_ops;
+	rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
 	return iavf_rxq_vec_setup_default(rxq);
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* RE: [PATCH v7] net/iavf: fix mbuf release function point corrupt in multi-process
  2022-05-19  7:36           ` [PATCH v7] net/iavf: " Ke Zhang
@ 2022-05-19  9:25             ` Zhang, Qi Z
  0 siblings, 0 replies; 19+ messages in thread
From: Zhang, Qi Z @ 2022-05-19  9:25 UTC (permalink / raw)
  To: Zhang, Ke1X, Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev
  Cc: Zhang, Ke1X, stable



> -----Original Message-----
> From: Ke Zhang <ke1x.zhang@intel.com>
> Sent: Thursday, May 19, 2022 3:36 PM
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Cc: Zhang, Ke1X <ke1x.zhang@intel.com>; stable@dpdk.org
> Subject: [PATCH v7] net/iavf: fix mbuf release function point corrupt in multi-
> process
> 
> In the multiple process environment, the subprocess operates on the shared
> memory and changes the function pointer of the main process, resulting in the
> failure to find the address of the function when main process releasing,
> resulting in crash.
> 
> Fixes: 319c421f3890 ("net/avf: enable SSE Rx Tx")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2022-05-19  9:25 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-02  9:51 [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode Ke Zhang
2022-04-03  2:07 ` Zhang, Qi Z
2022-04-14  9:29 ` [PATCH v2] " Ke Zhang
2022-04-18  6:41   ` Zhang, Qi Z
2022-05-07  6:24   ` [PATCH v3] fix mbuf release function point corrupt in multi-process Ke Zhang
2022-05-09  1:16   ` Ke Zhang
2022-05-09  1:35     ` Zhang, Qi Z
2022-05-10  2:54     ` [PATCH v4] " Ke Zhang
2022-05-12  2:21       ` [PATCH v5] " Ke Zhang
2022-05-12  5:57       ` Ke Zhang
2022-05-12 17:26         ` Stephen Hemminger
2022-05-13  1:34           ` Zhang, Ke1X
2022-05-13  1:57             ` Stephen Hemminger
2022-05-12  7:44       ` Ke Zhang
2022-05-16  6:41         ` [PATCH v6] " Ke Zhang
2022-05-16  6:55         ` Ke Zhang
2022-05-17  7:27           ` Zhang, Qi Z
2022-05-19  7:36           ` [PATCH v7] net/iavf: " Ke Zhang
2022-05-19  9:25             ` Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).