* [PATCH v5] fix mbuf release function point corrupt in multi-process
2022-05-10 2:54 ` [PATCH v4] " Ke Zhang
@ 2022-05-12 2:21 ` Ke Zhang
2022-05-12 5:57 ` Ke Zhang
2022-05-12 7:44 ` Ke Zhang
2 siblings, 0 replies; 19+ messages in thread
From: Ke Zhang @ 2022-05-12 2:21 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang
In the multiple process environment, the subprocess operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
drivers/net/iavf/iavf_rxtx.c | 50 ++++++++++++++++++++-----
drivers/net/iavf/iavf_rxtx.h | 11 ++++++
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 8 +---
drivers/net/iavf/iavf_rxtx_vec_sse.c | 16 ++------
4 files changed, 57 insertions(+), 28 deletions(-)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..c4f43c9d0b 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,14 +362,44 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_rxq_ops def_rxq_ops = {
+static const
+struct iavf_rxq_ops def_rxq_ops = {
.release_mbufs = release_rxq_mbufs,
};
-static const struct iavf_txq_ops def_txq_ops = {
+static const
+struct iavf_txq_ops def_txq_ops = {
.release_mbufs = release_txq_mbufs,
};
+static const
+struct iavf_rxq_ops sse_vec_rxq_ops = {
+ .release_mbufs = iavf_rx_queue_release_mbufs_sse,
+};
+
+static const
+struct iavf_txq_ops sse_vec_txq_ops = {
+ .release_mbufs = iavf_tx_queue_release_mbufs_sse,
+};
+
+static const
+struct iavf_txq_ops avx512_vec_txq_ops = {
+ .release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+};
+
+static
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT] = def_rxq_ops,
+ [IAVF_REL_MBUFS_SSE_VEC] = sse_vec_rxq_ops,
+};
+
+static
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT] = def_txq_ops,
+ [IAVF_REL_MBUFS_SSE_VEC] = sse_vec_txq_ops,
+ [IAVF_REL_MBUFS_AVX512_VEC] = avx512_vec_txq_ops,
+};
+
static inline void
iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
struct rte_mbuf *mb,
@@ -674,7 +704,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +841,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
@@ -943,7 +973,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -971,7 +1001,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -986,7 +1016,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1000,7 +1030,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1034,7 +1064,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -1042,7 +1072,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..48cc0da6f5 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -187,6 +187,7 @@ struct iavf_rx_queue {
struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
struct rte_mbuf fake_mbuf; /* dummy mbuf */
uint8_t rxdid;
+ uint8_t rel_mbufs_type;
/* used for VPMD */
uint16_t rxrearm_nb; /* number of remaining to be re-armed */
@@ -246,6 +247,7 @@ struct iavf_tx_queue {
uint16_t last_desc_cleaned; /* last desc have been cleaned*/
uint16_t free_thresh;
uint16_t rs_thresh;
+ uint8_t rel_mbufs_type;
uint16_t port_id;
uint16_t queue_id;
@@ -389,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
__le32 ipsec_said;
};
+enum iavf_rxtx_rel_mbufs_type {
+ IAVF_REL_MBUFS_DEFAULT = 0,
+ IAVF_REL_MBUFS_SSE_VEC = 1,
+ IAVF_REL_MBUFS_AVX512_VEC = 2,
+};
+
/* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed
@@ -692,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
static inline
void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..665ca84762 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
}
-static inline void
+void
iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
{
unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
int __rte_cold
iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
{
- txq->ops = &avx512_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
return 0;
}
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..f8db1b152a 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
-static void __rte_cold
+void
iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
{
_iavf_rx_queue_release_mbufs_vec(rxq);
}
-static void __rte_cold
+void
iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
{
_iavf_tx_queue_release_mbufs_vec(txq);
}
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
- .release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
int __rte_cold
iavf_txq_vec_setup(struct iavf_tx_queue *txq)
{
- txq->ops = &sse_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return 0;
}
int __rte_cold
iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
{
- rxq->ops = &sse_vec_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return iavf_rxq_vec_setup_default(rxq);
}
--
2.25.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v5] fix mbuf release function point corrupt in multi-process
2022-05-10 2:54 ` [PATCH v4] " Ke Zhang
2022-05-12 2:21 ` [PATCH v5] " Ke Zhang
@ 2022-05-12 5:57 ` Ke Zhang
2022-05-12 17:26 ` Stephen Hemminger
2022-05-12 7:44 ` Ke Zhang
2 siblings, 1 reply; 19+ messages in thread
From: Ke Zhang @ 2022-05-12 5:57 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang
In the multiple process environment, the subprocess operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
drivers/net/iavf/iavf_rxtx.c | 29 +++++++++++++++----------
drivers/net/iavf/iavf_rxtx.h | 11 ++++++++++
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 8 ++-----
drivers/net/iavf/iavf_rxtx_vec_sse.c | 16 ++++----------
4 files changed, 34 insertions(+), 30 deletions(-)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..a4a6fcee10 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,12 +362,17 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_rxq_ops def_rxq_ops = {
- .release_mbufs = release_rxq_mbufs,
+static
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
};
-static const struct iavf_txq_ops def_txq_ops = {
- .release_mbufs = release_txq_mbufs,
+static
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+ [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
};
static inline void
@@ -674,7 +679,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +816,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
@@ -943,7 +948,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -971,7 +976,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -986,7 +991,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1000,7 +1005,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1034,7 +1039,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -1042,7 +1047,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..48cc0da6f5 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -187,6 +187,7 @@ struct iavf_rx_queue {
struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
struct rte_mbuf fake_mbuf; /* dummy mbuf */
uint8_t rxdid;
+ uint8_t rel_mbufs_type;
/* used for VPMD */
uint16_t rxrearm_nb; /* number of remaining to be re-armed */
@@ -246,6 +247,7 @@ struct iavf_tx_queue {
uint16_t last_desc_cleaned; /* last desc have been cleaned*/
uint16_t free_thresh;
uint16_t rs_thresh;
+ uint8_t rel_mbufs_type;
uint16_t port_id;
uint16_t queue_id;
@@ -389,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
__le32 ipsec_said;
};
+enum iavf_rxtx_rel_mbufs_type {
+ IAVF_REL_MBUFS_DEFAULT = 0,
+ IAVF_REL_MBUFS_SSE_VEC = 1,
+ IAVF_REL_MBUFS_AVX512_VEC = 2,
+};
+
/* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed
@@ -692,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
static inline
void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..665ca84762 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
}
-static inline void
+void
iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
{
unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
int __rte_cold
iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
{
- txq->ops = &avx512_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
return 0;
}
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..f8db1b152a 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
-static void __rte_cold
+void
iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
{
_iavf_rx_queue_release_mbufs_vec(rxq);
}
-static void __rte_cold
+void
iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
{
_iavf_tx_queue_release_mbufs_vec(txq);
}
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
- .release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
int __rte_cold
iavf_txq_vec_setup(struct iavf_tx_queue *txq)
{
- txq->ops = &sse_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return 0;
}
int __rte_cold
iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
{
- rxq->ops = &sse_vec_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return iavf_rxq_vec_setup_default(rxq);
}
--
2.25.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v5] fix mbuf release function point corrupt in multi-process
2022-05-12 5:57 ` Ke Zhang
@ 2022-05-12 17:26 ` Stephen Hemminger
2022-05-13 1:34 ` Zhang, Ke1X
0 siblings, 1 reply; 19+ messages in thread
From: Stephen Hemminger @ 2022-05-12 17:26 UTC (permalink / raw)
To: Ke Zhang; +Cc: xiaoyun.li, jingjing.wu, beilei.xing, dev
On Thu, 12 May 2022 05:57:19 +0000
Ke Zhang <ke1x.zhang@intel.com> wrote:
>
> -static const struct iavf_rxq_ops def_rxq_ops = {
> - .release_mbufs = release_rxq_mbufs,
> +static
> +struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
> + [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
> + [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
> };
>
> -static const struct iavf_txq_ops def_txq_ops = {
> - .release_mbufs = release_txq_mbufs,
> +static
> +struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
> + [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
> + [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
> + [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
> };
Did you have to take const off of these?
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH v5] fix mbuf release function point corrupt in multi-process
2022-05-12 17:26 ` Stephen Hemminger
@ 2022-05-13 1:34 ` Zhang, Ke1X
2022-05-13 1:57 ` Stephen Hemminger
0 siblings, 1 reply; 19+ messages in thread
From: Zhang, Ke1X @ 2022-05-13 1:34 UTC (permalink / raw)
To: Stephen Hemminger; +Cc: Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev
> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: Friday, May 13, 2022 1:27 AM
> To: Zhang, Ke1X <ke1x.zhang@intel.com>
> Cc: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Subject: Re: [PATCH v5] fix mbuf release function point corrupt in multi-
> process
>
> On Thu, 12 May 2022 05:57:19 +0000
> Ke Zhang <ke1x.zhang@intel.com> wrote:
>
> >
> > -static const struct iavf_rxq_ops def_rxq_ops = {
> > - .release_mbufs = release_rxq_mbufs,
> > +static
> > +struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
> > + [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
> > + [IAVF_REL_MBUFS_SSE_VEC].release_mbufs =
> iavf_rx_queue_release_mbufs_sse,
> > };
> >
> > -static const struct iavf_txq_ops def_txq_ops = {
> > - .release_mbufs = release_txq_mbufs,
> > +static
> > +struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
> > + [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
> > + [IAVF_REL_MBUFS_SSE_VEC].release_mbufs =
> iavf_tx_queue_release_mbufs_sse,
> > + [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs =
> iavf_tx_queue_release_mbufs_avx512,
> > };
>
> Did you have to take const off of these?
Thanks for your comments, I check the other code like linux kernel , I found there are no const for the function pointer, like:
static struct pci_driver ice_driver = {
.name = KBUILD_MODNAME,
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
#ifdef CONFIG_PM
.driver.pm = &ice_pm_ops,
#endif /* CONFIG_PM */
.shutdown = ice_shutdown,
#ifndef STATIC_QOS_CFG_SUPPORT
.sriov_configure = ice_sriov_configure,
#endif /* !STATIC_QOS_CFG_SUPPORT */
#ifdef HAVE_RHEL7_PCI_DRIVER_RH
.pci_driver_rh = &ice_driver_rh,
#endif /* HAVE_RHEL7_PCI_DRIVER_RH */
.err_handler = &ice_pci_err_handler
};
So I don't add the const.
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v5] fix mbuf release function point corrupt in multi-process
2022-05-13 1:34 ` Zhang, Ke1X
@ 2022-05-13 1:57 ` Stephen Hemminger
0 siblings, 0 replies; 19+ messages in thread
From: Stephen Hemminger @ 2022-05-13 1:57 UTC (permalink / raw)
To: Zhang, Ke1X; +Cc: Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev
On Fri, 13 May 2022 01:34:02 +0000
"Zhang, Ke1X" <ke1x.zhang@intel.com> wrote:
> > -----Original Message-----
> > From: Stephen Hemminger <stephen@networkplumber.org>
> > Sent: Friday, May 13, 2022 1:27 AM
> > To: Zhang, Ke1X <ke1x.zhang@intel.com>
> > Cc: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> > Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> > Subject: Re: [PATCH v5] fix mbuf release function point corrupt in multi-
> > process
> >
> > On Thu, 12 May 2022 05:57:19 +0000
> > Ke Zhang <ke1x.zhang@intel.com> wrote:
> >
> > >
> > > -static const struct iavf_rxq_ops def_rxq_ops = {
> > > - .release_mbufs = release_rxq_mbufs,
> > > +static
> > > +struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
> > > + [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
> > > + [IAVF_REL_MBUFS_SSE_VEC].release_mbufs =
> > iavf_rx_queue_release_mbufs_sse,
> > > };
> > >
> > > -static const struct iavf_txq_ops def_txq_ops = {
> > > - .release_mbufs = release_txq_mbufs,
> > > +static
> > > +struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
> > > + [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
> > > + [IAVF_REL_MBUFS_SSE_VEC].release_mbufs =
> > iavf_tx_queue_release_mbufs_sse,
> > > + [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs =
> > iavf_tx_queue_release_mbufs_avx512,
> > > };
> >
> > Did you have to take const off of these?
>
> Thanks for your comments, I check the other code like linux kernel , I found there are no const for the function pointer, like:
>
> static struct pci_driver ice_driver = {
> .name = KBUILD_MODNAME,
> .id_table = ice_pci_tbl,
> .probe = ice_probe,
> .remove = ice_remove,
> #ifdef CONFIG_PM
> .driver.pm = &ice_pm_ops,
> #endif /* CONFIG_PM */
> .shutdown = ice_shutdown,
> #ifndef STATIC_QOS_CFG_SUPPORT
> .sriov_configure = ice_sriov_configure,
> #endif /* !STATIC_QOS_CFG_SUPPORT */
> #ifdef HAVE_RHEL7_PCI_DRIVER_RH
> .pci_driver_rh = &ice_driver_rh,
> #endif /* HAVE_RHEL7_PCI_DRIVER_RH */
> .err_handler = &ice_pci_err_handler
> };
>
> So I don't add the const.
>
This is not the kernel! The kernel pci device has other reasons
it can't be const. This is because the Linux kernel pci_driver structure
gets linked into the list of PCI devices. The kernel should be splitting the device object
(pci_driver) from the functions by introducing a new pci_driver_ops.
But this would require lots of extra work; the kernel hardening project may get to it.
As a general rule: any table with function pointers should be const for security reasons.
The DPDK has less security requirements than the kernel and less security testing,
but developers should try to avoid issues if possible.
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v5] fix mbuf release function point corrupt in multi-process
2022-05-10 2:54 ` [PATCH v4] " Ke Zhang
2022-05-12 2:21 ` [PATCH v5] " Ke Zhang
2022-05-12 5:57 ` Ke Zhang
@ 2022-05-12 7:44 ` Ke Zhang
2022-05-16 6:41 ` [PATCH v6] " Ke Zhang
2022-05-16 6:55 ` Ke Zhang
2 siblings, 2 replies; 19+ messages in thread
From: Ke Zhang @ 2022-05-12 7:44 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang
In the multiple process environment, the subprocess operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
drivers/net/iavf/iavf_rxtx.c | 29 +++++++++++++++----------
drivers/net/iavf/iavf_rxtx.h | 11 ++++++++++
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 8 ++-----
drivers/net/iavf/iavf_rxtx_vec_sse.c | 16 ++++----------
4 files changed, 34 insertions(+), 30 deletions(-)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..a4a6fcee10 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,12 +362,17 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_rxq_ops def_rxq_ops = {
- .release_mbufs = release_rxq_mbufs,
+static
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
};
-static const struct iavf_txq_ops def_txq_ops = {
- .release_mbufs = release_txq_mbufs,
+static
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+ [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
};
static inline void
@@ -674,7 +679,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +816,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
@@ -943,7 +948,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -971,7 +976,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -986,7 +991,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1000,7 +1005,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1034,7 +1039,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -1042,7 +1047,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..48cc0da6f5 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -187,6 +187,7 @@ struct iavf_rx_queue {
struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
struct rte_mbuf fake_mbuf; /* dummy mbuf */
uint8_t rxdid;
+ uint8_t rel_mbufs_type;
/* used for VPMD */
uint16_t rxrearm_nb; /* number of remaining to be re-armed */
@@ -246,6 +247,7 @@ struct iavf_tx_queue {
uint16_t last_desc_cleaned; /* last desc have been cleaned*/
uint16_t free_thresh;
uint16_t rs_thresh;
+ uint8_t rel_mbufs_type;
uint16_t port_id;
uint16_t queue_id;
@@ -389,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
__le32 ipsec_said;
};
+enum iavf_rxtx_rel_mbufs_type {
+ IAVF_REL_MBUFS_DEFAULT = 0,
+ IAVF_REL_MBUFS_SSE_VEC = 1,
+ IAVF_REL_MBUFS_AVX512_VEC = 2,
+};
+
/* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed
@@ -692,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
static inline
void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..3bfec63851 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
}
-static inline void
+void __rte_cold
iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
{
unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
int __rte_cold
iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
{
- txq->ops = &avx512_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
return 0;
}
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..4a5232c1d2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
-static void __rte_cold
+void __rte_cold
iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
{
_iavf_rx_queue_release_mbufs_vec(rxq);
}
-static void __rte_cold
+void __rte_cold
iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
{
_iavf_tx_queue_release_mbufs_vec(txq);
}
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
- .release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
int __rte_cold
iavf_txq_vec_setup(struct iavf_tx_queue *txq)
{
- txq->ops = &sse_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return 0;
}
int __rte_cold
iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
{
- rxq->ops = &sse_vec_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return iavf_rxq_vec_setup_default(rxq);
}
--
2.25.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v6] fix mbuf release function point corrupt in multi-process
2022-05-12 7:44 ` Ke Zhang
@ 2022-05-16 6:41 ` Ke Zhang
2022-05-16 6:55 ` Ke Zhang
1 sibling, 0 replies; 19+ messages in thread
From: Ke Zhang @ 2022-05-16 6:41 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang
In the multiple process environment, the subprocess operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
drivers/net/iavf/iavf_rxtx.c | 35 ++++++++++++++++---------
drivers/net/iavf/iavf_rxtx.h | 11 ++++++++
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 8 ++----
drivers/net/iavf/iavf_rxtx_vec_sse.c | 16 +++--------
4 files changed, 40 insertions(+), 30 deletions(-)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..adcc874171 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,12 +362,23 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_rxq_ops def_rxq_ops = {
- .release_mbufs = release_rxq_mbufs,
+static const
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
+#endif
};
-static const struct iavf_txq_ops def_txq_ops = {
- .release_mbufs = release_txq_mbufs,
+static const
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+#endif
+#ifdef CC_AVX512_SUPPORT
+ [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+#endif
};
static inline void
@@ -674,7 +685,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +822,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
@@ -943,7 +954,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -971,7 +982,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -986,7 +997,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1000,7 +1011,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1034,7 +1045,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -1042,7 +1053,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..48cc0da6f5 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -187,6 +187,7 @@ struct iavf_rx_queue {
struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
struct rte_mbuf fake_mbuf; /* dummy mbuf */
uint8_t rxdid;
+ uint8_t rel_mbufs_type;
/* used for VPMD */
uint16_t rxrearm_nb; /* number of remaining to be re-armed */
@@ -246,6 +247,7 @@ struct iavf_tx_queue {
uint16_t last_desc_cleaned; /* last desc have been cleaned*/
uint16_t free_thresh;
uint16_t rs_thresh;
+ uint8_t rel_mbufs_type;
uint16_t port_id;
uint16_t queue_id;
@@ -389,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
__le32 ipsec_said;
};
+enum iavf_rxtx_rel_mbufs_type {
+ IAVF_REL_MBUFS_DEFAULT = 0,
+ IAVF_REL_MBUFS_SSE_VEC = 1,
+ IAVF_REL_MBUFS_AVX512_VEC = 2,
+};
+
/* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed
@@ -692,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
static inline
void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..3bfec63851 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
}
-static inline void
+void __rte_cold
iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
{
unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
int __rte_cold
iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
{
- txq->ops = &avx512_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
return 0;
}
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..4a5232c1d2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
-static void __rte_cold
+void __rte_cold
iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
{
_iavf_rx_queue_release_mbufs_vec(rxq);
}
-static void __rte_cold
+void __rte_cold
iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
{
_iavf_tx_queue_release_mbufs_vec(txq);
}
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
- .release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
int __rte_cold
iavf_txq_vec_setup(struct iavf_tx_queue *txq)
{
- txq->ops = &sse_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return 0;
}
int __rte_cold
iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
{
- rxq->ops = &sse_vec_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return iavf_rxq_vec_setup_default(rxq);
}
--
2.25.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v6] fix mbuf release function point corrupt in multi-process
2022-05-12 7:44 ` Ke Zhang
2022-05-16 6:41 ` [PATCH v6] " Ke Zhang
@ 2022-05-16 6:55 ` Ke Zhang
2022-05-17 7:27 ` Zhang, Qi Z
2022-05-19 7:36 ` [PATCH v7] net/iavf: " Ke Zhang
1 sibling, 2 replies; 19+ messages in thread
From: Ke Zhang @ 2022-05-16 6:55 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang
In the multiple process environment, the subprocess operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
drivers/net/iavf/iavf_rxtx.c | 36 ++++++++++++++++---------
drivers/net/iavf/iavf_rxtx.h | 11 ++++++++
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 8 ++----
drivers/net/iavf/iavf_rxtx_vec_sse.c | 16 +++--------
4 files changed, 41 insertions(+), 30 deletions(-)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..2f339b3703 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,12 +362,24 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_rxq_ops def_rxq_ops = {
- .release_mbufs = release_rxq_mbufs,
+static const
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
+#endif
};
-static const struct iavf_txq_ops def_txq_ops = {
- .release_mbufs = release_txq_mbufs,
+static const
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+#ifdef CC_AVX512_SUPPORT
+ [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+#endif
+#endif
+
};
static inline void
@@ -674,7 +686,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +823,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
@@ -943,7 +955,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -971,7 +983,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -986,7 +998,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1000,7 +1012,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1034,7 +1046,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -1042,7 +1054,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..48cc0da6f5 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -187,6 +187,7 @@ struct iavf_rx_queue {
struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
struct rte_mbuf fake_mbuf; /* dummy mbuf */
uint8_t rxdid;
+ uint8_t rel_mbufs_type;
/* used for VPMD */
uint16_t rxrearm_nb; /* number of remaining to be re-armed */
@@ -246,6 +247,7 @@ struct iavf_tx_queue {
uint16_t last_desc_cleaned; /* last desc have been cleaned*/
uint16_t free_thresh;
uint16_t rs_thresh;
+ uint8_t rel_mbufs_type;
uint16_t port_id;
uint16_t queue_id;
@@ -389,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
__le32 ipsec_said;
};
+enum iavf_rxtx_rel_mbufs_type {
+ IAVF_REL_MBUFS_DEFAULT = 0,
+ IAVF_REL_MBUFS_SSE_VEC = 1,
+ IAVF_REL_MBUFS_AVX512_VEC = 2,
+};
+
/* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed
@@ -692,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
static inline
void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..3bfec63851 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
}
-static inline void
+void __rte_cold
iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
{
unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
int __rte_cold
iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
{
- txq->ops = &avx512_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
return 0;
}
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..4a5232c1d2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
-static void __rte_cold
+void __rte_cold
iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
{
_iavf_rx_queue_release_mbufs_vec(rxq);
}
-static void __rte_cold
+void __rte_cold
iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
{
_iavf_tx_queue_release_mbufs_vec(txq);
}
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
- .release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
int __rte_cold
iavf_txq_vec_setup(struct iavf_tx_queue *txq)
{
- txq->ops = &sse_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return 0;
}
int __rte_cold
iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
{
- rxq->ops = &sse_vec_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return iavf_rxq_vec_setup_default(rxq);
}
--
2.25.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH v6] fix mbuf release function point corrupt in multi-process
2022-05-16 6:55 ` Ke Zhang
@ 2022-05-17 7:27 ` Zhang, Qi Z
2022-05-19 7:36 ` [PATCH v7] net/iavf: " Ke Zhang
1 sibling, 0 replies; 19+ messages in thread
From: Zhang, Qi Z @ 2022-05-17 7:27 UTC (permalink / raw)
To: Zhang, Ke1X, Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev; +Cc: Zhang, Ke1X
> -----Original Message-----
> From: Ke Zhang <ke1x.zhang@intel.com>
> Sent: Monday, May 16, 2022 2:55 PM
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Cc: Zhang, Ke1X <ke1x.zhang@intel.com>
> Subject: [PATCH v6] fix mbuf release function point corrupt in multi-process
>
Please fix the title format
> In the multiple process environment, the subprocess operates on the shared
> memory and changes the function pointer of the main process, resulting in the
> failure to find the address of the function when main process releasing,
> resulting in crash.
Please add the fixline and Cc stable.
>
> Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v7] net/iavf: fix mbuf release function point corrupt in multi-process
2022-05-16 6:55 ` Ke Zhang
2022-05-17 7:27 ` Zhang, Qi Z
@ 2022-05-19 7:36 ` Ke Zhang
2022-05-19 9:25 ` Zhang, Qi Z
1 sibling, 1 reply; 19+ messages in thread
From: Ke Zhang @ 2022-05-19 7:36 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang, stable
In the multiple process environment, the subprocess operates on the
shared memory and changes the function pointer of the main process,
resulting in the failure to find the address of the function when main
process releasing, resulting in crash.
Fixes: 319c421f3890 ("net/avf: enable SSE Rx Tx")
Cc: stable@dpdk.org
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
drivers/net/iavf/iavf_rxtx.c | 36 ++++++++++++++++---------
drivers/net/iavf/iavf_rxtx.h | 11 ++++++++
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 8 ++----
drivers/net/iavf/iavf_rxtx_vec_sse.c | 16 +++--------
4 files changed, 41 insertions(+), 30 deletions(-)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 345f6aeebc..bf1adfccef 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,12 +362,24 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_rxq_ops def_rxq_ops = {
- .release_mbufs = release_rxq_mbufs,
+static const
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
+#endif
};
-static const struct iavf_txq_ops def_txq_ops = {
- .release_mbufs = release_txq_mbufs,
+static const
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+#ifdef CC_AVX512_SUPPORT
+ [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+#endif
+#endif
+
};
static inline void
@@ -678,7 +690,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -815,7 +827,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
@@ -947,7 +959,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -975,7 +987,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -990,7 +1002,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1004,7 +1016,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1038,7 +1050,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -1046,7 +1058,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 642b9a700a..e8362bbd1d 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -190,6 +190,7 @@ struct iavf_rx_queue {
struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
struct rte_mbuf fake_mbuf; /* dummy mbuf */
uint8_t rxdid;
+ uint8_t rel_mbufs_type;
/* used for VPMD */
uint16_t rxrearm_nb; /* number of remaining to be re-armed */
@@ -249,6 +250,7 @@ struct iavf_tx_queue {
uint16_t last_desc_cleaned; /* last desc have been cleaned*/
uint16_t free_thresh;
uint16_t rs_thresh;
+ uint8_t rel_mbufs_type;
uint16_t port_id;
uint16_t queue_id;
@@ -392,6 +394,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
__le32 ipsec_said;
};
+enum iavf_rxtx_rel_mbufs_type {
+ IAVF_REL_MBUFS_DEFAULT = 0,
+ IAVF_REL_MBUFS_SSE_VEC = 1,
+ IAVF_REL_MBUFS_AVX512_VEC = 2,
+};
+
/* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed
@@ -695,6 +703,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
static inline
void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..3bfec63851 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
}
-static inline void
+void __rte_cold
iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
{
unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
int __rte_cold
iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
{
- txq->ops = &avx512_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
return 0;
}
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..4a5232c1d2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
-static void __rte_cold
+void __rte_cold
iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
{
_iavf_rx_queue_release_mbufs_vec(rxq);
}
-static void __rte_cold
+void __rte_cold
iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
{
_iavf_tx_queue_release_mbufs_vec(txq);
}
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
- .release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
int __rte_cold
iavf_txq_vec_setup(struct iavf_tx_queue *txq)
{
- txq->ops = &sse_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return 0;
}
int __rte_cold
iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
{
- rxq->ops = &sse_vec_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return iavf_rxq_vec_setup_default(rxq);
}
--
2.25.1
^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH v7] net/iavf: fix mbuf release function point corrupt in multi-process
2022-05-19 7:36 ` [PATCH v7] net/iavf: " Ke Zhang
@ 2022-05-19 9:25 ` Zhang, Qi Z
0 siblings, 0 replies; 19+ messages in thread
From: Zhang, Qi Z @ 2022-05-19 9:25 UTC (permalink / raw)
To: Zhang, Ke1X, Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev
Cc: Zhang, Ke1X, stable
> -----Original Message-----
> From: Ke Zhang <ke1x.zhang@intel.com>
> Sent: Thursday, May 19, 2022 3:36 PM
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Cc: Zhang, Ke1X <ke1x.zhang@intel.com>; stable@dpdk.org
> Subject: [PATCH v7] net/iavf: fix mbuf release function point corrupt in multi-
> process
>
> In the multiple process environment, the subprocess operates on the shared
> memory and changes the function pointer of the main process, resulting in the
> failure to find the address of the function when main process releasing,
> resulting in crash.
>
> Fixes: 319c421f3890 ("net/avf: enable SSE Rx Tx")
> Cc: stable@dpdk.org
>
> Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel.
Thanks
Qi
^ permalink raw reply [flat|nested] 19+ messages in thread