* [PATCH] net/iavf: when E810 VF interrupt disable, only receive 4 packets once, fix 4 to 1.
@ 2022-04-25 8:36 Ke Zhang
2022-05-19 9:29 ` [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode Ke Zhang
` (3 more replies)
0 siblings, 4 replies; 12+ messages in thread
From: Ke Zhang @ 2022-04-25 8:36 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang
For Rx-Queue Interrupt Setting, when vf rx interrupt
disable(INTENA=0), there are two ways to write back
descriptor to host memory:
1)Set WB_ON_ITR bit 0 to Interrupt Dynamic Control Register:
Completed descriptors are posted to host memory according to
the internal descriptor cache policy (in other words when a
full cache line is available for write-back).
2)Set WB_ON_ITR bit 1 to Interrupt Dynamic Control Register:
Completed descriptors also trigger the ITR. Following ITR
expiration, all leftover completed descriptors are posted to
host memory.
Changing 1) to 2) to make sure VF synchronizing with PF.
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
drivers/net/iavf/iavf_ethdev.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..17c7720600 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1833,7 +1833,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
IAVF_WRITE_REG(hw,
IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
- 0);
+ IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
IAVF_WRITE_FLUSH(hw);
return 0;
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode
2022-04-25 8:36 [PATCH] net/iavf: when E810 VF interrupt disable, only receive 4 packets once, fix 4 to 1 Ke Zhang
@ 2022-05-19 9:29 ` Ke Zhang
2022-05-19 9:30 ` [PATCH v2] net/iavf: fix Rx queue interrupt setting Ke Zhang
` (2 subsequent siblings)
3 siblings, 0 replies; 12+ messages in thread
From: Ke Zhang @ 2022-05-19 9:29 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang
In the multi process environment, the sub process
operates on the shared memory and changes the
function pointer of the main process, resulting in
the failure to find the address of the function when
main process releasing, resulting in crash.
similar with commit<20b631efe785819eb77aabbf500b3352e5731bdb>
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
drivers/net/iavf/iavf_rxtx.c | 27 ++++++++++++++-----------
drivers/net/iavf/iavf_rxtx.h | 6 +++---
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 4 ++--
drivers/net/iavf/iavf_rxtx_vec_sse.c | 8 ++++----
4 files changed, 24 insertions(+), 21 deletions(-)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..197c03cd31 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,6 +362,9 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
}
}
+const struct iavf_rxq_ops *iavf_rxq_release_mbufs_ops;
+const struct iavf_txq_ops *iavf_txq_release_mbufs_ops;
+
static const struct iavf_rxq_ops def_rxq_ops = {
.release_mbufs = release_rxq_mbufs,
};
@@ -674,7 +677,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ iavf_rxq_release_mbufs_ops = &def_rxq_ops;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +814,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ iavf_txq_release_mbufs_ops = &def_txq_ops;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
@@ -943,7 +946,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops->release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -971,7 +974,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops->release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -986,7 +989,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops->release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1000,7 +1003,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops->release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1034,7 +1037,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops->release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -1042,7 +1045,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops->release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -2825,7 +2828,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- (void)iavf_rxq_vec_setup(rxq);
+ (void)iavf_rxq_vec_setup(rxq, &iavf_rxq_release_mbufs_ops);
}
if (dev->data->scattered_rx) {
@@ -3008,11 +3011,11 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
continue;
#ifdef CC_AVX512_SUPPORT
if (use_avx512)
- iavf_txq_vec_setup_avx512(txq);
+ iavf_txq_vec_setup_avx512(&iavf_txq_release_mbufs_ops);
else
- iavf_txq_vec_setup(txq);
+ iavf_txq_vec_setup(&iavf_txq_release_mbufs_ops);
#else
- iavf_txq_vec_setup(txq);
+ iavf_txq_vec_setup(&iavf_txq_release_mbufs_ops);
#endif
}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..7df501d784 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -657,8 +657,8 @@ uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
int iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
-int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
-int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
+int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq, const struct iavf_rxq_ops **rxq_ops);
+int iavf_txq_vec_setup(const struct iavf_txq_ops **txq_ops);
uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t iavf_recv_pkts_vec_avx512_offload(void *rx_queue,
@@ -687,7 +687,7 @@ uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t iavf_xmit_pkts_vec_avx512_offload(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
-int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
+int iavf_txq_vec_setup_avx512(const struct iavf_txq_ops **txq_ops);
uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..08de34c87c 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -2017,9 +2017,9 @@ static const struct iavf_txq_ops avx512_vec_txq_ops = {
};
int __rte_cold
-iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup_avx512(const struct iavf_txq_ops **txq_ops)
{
- txq->ops = &avx512_vec_txq_ops;
+ *txq_ops = &avx512_vec_txq_ops;
return 0;
}
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..a782bed2e0 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1219,16 +1219,16 @@ static const struct iavf_txq_ops sse_vec_txq_ops = {
};
int __rte_cold
-iavf_txq_vec_setup(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup(const struct iavf_txq_ops **txq_ops)
{
- txq->ops = &sse_vec_txq_ops;
+ *txq_ops = &sse_vec_txq_ops;
return 0;
}
int __rte_cold
-iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
+iavf_rxq_vec_setup(struct iavf_rx_queue *rxq, const struct iavf_rxq_ops **rxq_ops)
{
- rxq->ops = &sse_vec_rxq_ops;
+ *rxq_ops = &sse_vec_rxq_ops;
return iavf_rxq_vec_setup_default(rxq);
}
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v2] net/iavf: fix Rx queue interrupt setting
2022-04-25 8:36 [PATCH] net/iavf: when E810 VF interrupt disable, only receive 4 packets once, fix 4 to 1 Ke Zhang
2022-05-19 9:29 ` [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode Ke Zhang
@ 2022-05-19 9:30 ` Ke Zhang
2022-05-19 9:56 ` Zhang, Qi Z
2022-05-20 2:39 ` [PATCH v2 0/1] " Ke Zhang
2022-05-20 3:00 ` [PATCH v2] net/iavf: " Ke Zhang
3 siblings, 1 reply; 12+ messages in thread
From: Ke Zhang @ 2022-05-19 9:30 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang, stable
For Rx-Queue Interrupt Setting, when vf rx interrupt
disable(INTENA=0), there are two ways to write back
descriptor to host memory:
1)Set WB_ON_ITR bit 0 to Interrupt Dynamic Control Register:
Completed descriptors are posted to host memory according to
the internal descriptor cache policy (in other words when a
full cache line is available for write-back).
2)Set WB_ON_ITR bit 1 to Interrupt Dynamic Control Register:
Completed descriptors also trigger the ITR. Following ITR
expiration, all leftover completed descriptors are posted to
host memory.
Changing 1) to 2) to make sure VF synchronizing with PF.
Fixes: d6bde6b5eae9 ("net/avf: enable Rx interrupt")
Cc: stable@dpdk.org
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
drivers/net/iavf/iavf_ethdev.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..17c7720600 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1833,7 +1833,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
IAVF_WRITE_REG(hw,
IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
- 0);
+ IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
IAVF_WRITE_FLUSH(hw);
return 0;
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [PATCH v2] net/iavf: fix Rx queue interrupt setting
2022-05-19 9:30 ` [PATCH v2] net/iavf: fix Rx queue interrupt setting Ke Zhang
@ 2022-05-19 9:56 ` Zhang, Qi Z
0 siblings, 0 replies; 12+ messages in thread
From: Zhang, Qi Z @ 2022-05-19 9:56 UTC (permalink / raw)
To: Zhang, Ke1X, Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev
Cc: Zhang, Ke1X, stable
> -----Original Message-----
> From: Ke Zhang <ke1x.zhang@intel.com>
> Sent: Thursday, May 19, 2022 5:31 PM
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Cc: Zhang, Ke1X <ke1x.zhang@intel.com>; stable@dpdk.org
> Subject: [PATCH v2] net/iavf: fix Rx queue interrupt setting
>
> For Rx-Queue Interrupt Setting, when vf rx interrupt disable(INTENA=0), there
> are two ways to write back descriptor to host memory:
>
> 1)Set WB_ON_ITR bit 0 to Interrupt Dynamic Control Register:
> Completed descriptors are posted to host memory according to the internal
> descriptor cache policy (in other words when a full cache line is available for
> write-back).
>
> 2)Set WB_ON_ITR bit 1 to Interrupt Dynamic Control Register:
> Completed descriptors also trigger the ITR. Following ITR expiration, all
> leftover completed descriptors are posted to host memory.
>
> Changing 1) to 2) to make sure VF synchronizing with PF.
You only change 1) to 2) in iavf_dev_rx_queue_intr_disable
please add more explanation what's the issue and how we fix this.
>
> Fixes: d6bde6b5eae9 ("net/avf: enable Rx interrupt")
> Cc: stable@dpdk.org
>
> Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
> ---
Please add change log here.
> drivers/net/iavf/iavf_ethdev.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index
> d6190ac24a..17c7720600 100644
> --- a/drivers/net/iavf/iavf_ethdev.c
> +++ b/drivers/net/iavf/iavf_ethdev.c
> @@ -1833,7 +1833,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev
> *dev, uint16_t queue_id)
>
> IAVF_WRITE_REG(hw,
> IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
> - 0);
> + IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
>
> IAVF_WRITE_FLUSH(hw);
> return 0;
> --
> 2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v2 0/1] fix Rx queue interrupt setting
2022-04-25 8:36 [PATCH] net/iavf: when E810 VF interrupt disable, only receive 4 packets once, fix 4 to 1 Ke Zhang
2022-05-19 9:29 ` [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode Ke Zhang
2022-05-19 9:30 ` [PATCH v2] net/iavf: fix Rx queue interrupt setting Ke Zhang
@ 2022-05-20 2:39 ` Ke Zhang
2022-05-20 2:39 ` [PATCH v2 1/1] net/iavf: " Ke Zhang
2022-05-20 2:51 ` [PATCH v2 0/1] " Zhang, Qi Z
2022-05-20 3:00 ` [PATCH v2] net/iavf: " Ke Zhang
3 siblings, 2 replies; 12+ messages in thread
From: Ke Zhang @ 2022-05-20 2:39 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang
v2:
Add more explanation what's the issue and how we fix this
issue in commit log.
Ke Zhang (1):
net/iavf: fix Rx queue interrupt setting
drivers/net/iavf/iavf_ethdev.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v2 1/1] net/iavf: fix Rx queue interrupt setting
2022-05-20 2:39 ` [PATCH v2 0/1] " Ke Zhang
@ 2022-05-20 2:39 ` Ke Zhang
2022-05-20 2:51 ` [PATCH v2 0/1] " Zhang, Qi Z
1 sibling, 0 replies; 12+ messages in thread
From: Ke Zhang @ 2022-05-20 2:39 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang, stable
For Rx-Queue Interrupt Setting, when vf rx interrupt
disable(INTENA=0), there are two ways to write back
descriptor to host memory:
1)Set WB_ON_ITR bit 0 to Interrupt Dynamic Control Register:
Completed descriptors are posted to host memory according to
the internal descriptor cache policy (in other words when a
full cache line is available for write-back).
A internal descriptor size is 16 bytes or 32 bytes, a cache
line size is 64 bytes or 128 bytes from datasheet :
PCIe Global Config 2 - GLPCI_CNF2 (0x000BE004; RO)
so the full cache line could contains 4 packets, it means
Network card will send 4 packets to host when a full cache line
is available.
2)Set WB_ON_ITR bit 1 to Interrupt Dynamic Control Register:
Completed descriptors also trigger the ITR. Following ITR
expiration, all leftover completed descriptors are posted to
host memory.
Network card will send packet to host even if only one
descriptor is completed.
Changing 1) to 2) to make sure VF send the packet to host even
if there is only one rx packet is ready in hardware.
Fixes: d6bde6b5eae9 ("net/avf: enable Rx interrupt")
Cc: stable@dpdk.org
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
drivers/net/iavf/iavf_ethdev.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..17c7720600 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1833,7 +1833,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
IAVF_WRITE_REG(hw,
IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
- 0);
+ IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
IAVF_WRITE_FLUSH(hw);
return 0;
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [PATCH v2 0/1] fix Rx queue interrupt setting
2022-05-20 2:39 ` [PATCH v2 0/1] " Ke Zhang
2022-05-20 2:39 ` [PATCH v2 1/1] net/iavf: " Ke Zhang
@ 2022-05-20 2:51 ` Zhang, Qi Z
1 sibling, 0 replies; 12+ messages in thread
From: Zhang, Qi Z @ 2022-05-20 2:51 UTC (permalink / raw)
To: Zhang, Ke1X, Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev; +Cc: Zhang, Ke1X
> -----Original Message-----
> From: Ke Zhang <ke1x.zhang@intel.com>
> Sent: Friday, May 20, 2022 10:40 AM
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Cc: Zhang, Ke1X <ke1x.zhang@intel.com>
> Subject: [PATCH v2 0/1] fix Rx queue interrupt setting
>
> v2:
> Add more explanation what's the issue and how we fix this issue in commit log.
For single patch, no need a separate change lot, it should be combined with the patch.
>
> Ke Zhang (1):
> net/iavf: fix Rx queue interrupt setting
>
> drivers/net/iavf/iavf_ethdev.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> --
> 2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v2] net/iavf: fix Rx queue interrupt setting
2022-04-25 8:36 [PATCH] net/iavf: when E810 VF interrupt disable, only receive 4 packets once, fix 4 to 1 Ke Zhang
` (2 preceding siblings ...)
2022-05-20 2:39 ` [PATCH v2 0/1] " Ke Zhang
@ 2022-05-20 3:00 ` Ke Zhang
2022-05-20 3:15 ` Zhang, Qi Z
3 siblings, 1 reply; 12+ messages in thread
From: Ke Zhang @ 2022-05-20 3:00 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang, stable
For Rx-Queue Interrupt Setting, when vf rx interrupt
disable(INTENA=0), there are two ways to write back
descriptor to host memory:
1)Set WB_ON_ITR bit 0 to Interrupt Dynamic Control Register:
Completed descriptors are posted to host memory according to
the internal descriptor cache policy (in other words when a
full cache line is available for write-back).
A internal descriptor size is 16 bytes or 32 bytes, a cache
line size is 64 bytes or 128 bytes from datasheet :
PCIe Global Config 2 - GLPCI_CNF2 (0x000BE004; RO)
so the full cache line could contains 4 packets, it means
Network card will send 4 packets to host when a full cache line
is available.
2)Set WB_ON_ITR bit 1 to Interrupt Dynamic Control Register:
Completed descriptors also trigger the ITR. Following ITR
expiration, all leftover completed descriptors are posted to
host memory.
Network card will send packet to host even if only one
descriptor is completed.
Changing 1) to 2) to make sure VF send the packet to host even
if there is only one rx packet is ready in hardware.
Fixes: d6bde6b5eae9 ("net/avf: enable Rx interrupt")
Cc: stable@dpdk.org
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
v2:
Add more explanation what's the issue and how we fix this
issue in commit log.
drivers/net/iavf/iavf_ethdev.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..17c7720600 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1833,7 +1833,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
IAVF_WRITE_REG(hw,
IAVF_VFINT_DYN_CTLN1(msix_intr - IAVF_RX_VEC_START),
- 0);
+ IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
IAVF_WRITE_FLUSH(hw);
return 0;
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [PATCH v2] net/iavf: fix Rx queue interrupt setting
2022-05-20 3:00 ` [PATCH v2] net/iavf: " Ke Zhang
@ 2022-05-20 3:15 ` Zhang, Qi Z
0 siblings, 0 replies; 12+ messages in thread
From: Zhang, Qi Z @ 2022-05-20 3:15 UTC (permalink / raw)
To: Zhang, Ke1X, Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev
Cc: Zhang, Ke1X, stable
> -----Original Message-----
> From: Ke Zhang <ke1x.zhang@intel.com>
> Sent: Friday, May 20, 2022 11:00 AM
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Cc: Zhang, Ke1X <ke1x.zhang@intel.com>; stable@dpdk.org
> Subject: [PATCH v2] net/iavf: fix Rx queue interrupt setting
>
> For Rx-Queue Interrupt Setting, when vf rx interrupt disable(INTENA=0), there
> are two ways to write back descriptor to host memory:
>
> 1)Set WB_ON_ITR bit 0 to Interrupt Dynamic Control Register:
> Completed descriptors are posted to host memory according to the internal
> descriptor cache policy (in other words when a full cache line is available for
> write-back).
>
> A internal descriptor size is 16 bytes or 32 bytes, a cache line size is 64 bytes or
> 128 bytes from datasheet :
> PCIe Global Config 2 - GLPCI_CNF2 (0x000BE004; RO) so the full cache line
> could contains 4 packets, it means Network card will send 4 packets to host
> when a full cache line is available.
>
> 2)Set WB_ON_ITR bit 1 to Interrupt Dynamic Control Register:
> Completed descriptors also trigger the ITR. Following ITR expiration, all
> leftover completed descriptors are posted to host memory.
>
> Network card will send packet to host even if only one descriptor is completed.
>
> Changing 1) to 2) to make sure VF send the packet to host even if there is only
> one rx packet is ready in hardware.
>
> Fixes: d6bde6b5eae9 ("net/avf: enable Rx interrupt")
> Cc: stable@dpdk.org
>
> Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel.
Thanks
Qi
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode
@ 2022-04-02 9:51 Ke Zhang
2022-04-03 2:07 ` Zhang, Qi Z
0 siblings, 1 reply; 12+ messages in thread
From: Ke Zhang @ 2022-04-02 9:51 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang
In the multi process environment, the sub process
operates on the shared memory and changes the
function pointer of the main process, resulting in
the failure to find the address of the function when
main process releasing, resulting in crash.
similar with commit<20b631efe785819eb77aabbf500b3352e5731bdb>
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
drivers/net/iavf/iavf_rxtx.c | 27 ++++++++++++++-----------
drivers/net/iavf/iavf_rxtx.h | 6 +++---
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 4 ++--
drivers/net/iavf/iavf_rxtx_vec_sse.c | 8 ++++----
4 files changed, 24 insertions(+), 21 deletions(-)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..197c03cd31 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,6 +362,9 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
}
}
+const struct iavf_rxq_ops *iavf_rxq_release_mbufs_ops;
+const struct iavf_txq_ops *iavf_txq_release_mbufs_ops;
+
static const struct iavf_rxq_ops def_rxq_ops = {
.release_mbufs = release_rxq_mbufs,
};
@@ -674,7 +677,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ iavf_rxq_release_mbufs_ops = &def_rxq_ops;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +814,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ iavf_txq_release_mbufs_ops = &def_txq_ops;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
@@ -943,7 +946,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops->release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -971,7 +974,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops->release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -986,7 +989,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops->release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1000,7 +1003,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops->release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1034,7 +1037,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops->release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -1042,7 +1045,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops->release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -2825,7 +2828,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- (void)iavf_rxq_vec_setup(rxq);
+ (void)iavf_rxq_vec_setup(rxq, &iavf_rxq_release_mbufs_ops);
}
if (dev->data->scattered_rx) {
@@ -3008,11 +3011,11 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
continue;
#ifdef CC_AVX512_SUPPORT
if (use_avx512)
- iavf_txq_vec_setup_avx512(txq);
+ iavf_txq_vec_setup_avx512(&iavf_txq_release_mbufs_ops);
else
- iavf_txq_vec_setup(txq);
+ iavf_txq_vec_setup(&iavf_txq_release_mbufs_ops);
#else
- iavf_txq_vec_setup(txq);
+ iavf_txq_vec_setup(&iavf_txq_release_mbufs_ops);
#endif
}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..7df501d784 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -657,8 +657,8 @@ uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
int iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
-int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
-int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
+int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq, const struct iavf_rxq_ops **rxq_ops);
+int iavf_txq_vec_setup(const struct iavf_txq_ops **txq_ops);
uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t iavf_recv_pkts_vec_avx512_offload(void *rx_queue,
@@ -687,7 +687,7 @@ uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t iavf_xmit_pkts_vec_avx512_offload(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
-int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
+int iavf_txq_vec_setup_avx512(const struct iavf_txq_ops **txq_ops);
uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..08de34c87c 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -2017,9 +2017,9 @@ static const struct iavf_txq_ops avx512_vec_txq_ops = {
};
int __rte_cold
-iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup_avx512(const struct iavf_txq_ops **txq_ops)
{
- txq->ops = &avx512_vec_txq_ops;
+ *txq_ops = &avx512_vec_txq_ops;
return 0;
}
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..a782bed2e0 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1219,16 +1219,16 @@ static const struct iavf_txq_ops sse_vec_txq_ops = {
};
int __rte_cold
-iavf_txq_vec_setup(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup(const struct iavf_txq_ops **txq_ops)
{
- txq->ops = &sse_vec_txq_ops;
+ *txq_ops = &sse_vec_txq_ops;
return 0;
}
int __rte_cold
-iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
+iavf_rxq_vec_setup(struct iavf_rx_queue *rxq, const struct iavf_rxq_ops **rxq_ops)
{
- rxq->ops = &sse_vec_rxq_ops;
+ *rxq_ops = &sse_vec_rxq_ops;
return iavf_rxq_vec_setup_default(rxq);
}
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode
2022-04-02 9:51 [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode Ke Zhang
@ 2022-04-03 2:07 ` Zhang, Qi Z
0 siblings, 0 replies; 12+ messages in thread
From: Zhang, Qi Z @ 2022-04-03 2:07 UTC (permalink / raw)
To: Zhang, Ke1X, Li, Xiaoyun, Wu, Jingjing, Xing, Beilei, dev; +Cc: Zhang, Ke1X
> -----Original Message-----
> From: Ke Zhang <ke1x.zhang@intel.com>
> Sent: Saturday, April 2, 2022 5:51 PM
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Cc: Zhang, Ke1X <ke1x.zhang@intel.com>
> Subject: [PATCH] net/iavf: fix iavf crashed on dev_stop when running in
> multi-process mode
>
> In the multi process environment, the sub process operates on the shared
> memory and changes the function pointer of the main process, resulting in
> the failure to find the address of the function when main process releasing,
> resulting in crash.
>
> similar with commit<20b631efe785819eb77aabbf500b3352e5731bdb>
>
> Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
> ---
> drivers/net/iavf/iavf_rxtx.c | 27 ++++++++++++++-----------
> drivers/net/iavf/iavf_rxtx.h | 6 +++---
> drivers/net/iavf/iavf_rxtx_vec_avx512.c | 4 ++--
> drivers/net/iavf/iavf_rxtx_vec_sse.c | 8 ++++----
> 4 files changed, 24 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index
> 16e8d021f9..197c03cd31 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -362,6 +362,9 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
> }
> }
>
> +const struct iavf_rxq_ops *iavf_rxq_release_mbufs_ops; const struct
> +iavf_txq_ops *iavf_txq_release_mbufs_ops;
> +
> static const struct iavf_rxq_ops def_rxq_ops = {
> .release_mbufs = release_rxq_mbufs,
> };
> @@ -674,7 +677,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
> rxq->q_set = true;
> dev->data->rx_queues[queue_idx] = rxq;
> rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
> - rxq->ops = &def_rxq_ops;
> + iavf_rxq_release_mbufs_ops = &def_rxq_ops;
This is not correct.
Now we replace per-queue ops with a global ops which is not expected.
Please reference method of below patch
commit 0ed16e01313e1f8930dc6a52b22159b20269d4e0
Author: Steve Yang <stevex.yang@intel.com>
Date: Mon Feb 28 09:48:59 2022 +0000
net/iavf: fix function pointer in multi-process
...
>
> diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c
> b/drivers/net/iavf/iavf_rxtx_vec_sse.c
> index 717a227b2c..a782bed2e0 100644
> --- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
> +++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
> @@ -1219,16 +1219,16 @@ static const struct iavf_txq_ops sse_vec_txq_ops
> = { };
>
> int __rte_cold
> -iavf_txq_vec_setup(struct iavf_tx_queue *txq)
> +iavf_txq_vec_setup(const struct iavf_txq_ops **txq_ops)
> {
> - txq->ops = &sse_vec_txq_ops;
> + *txq_ops = &sse_vec_txq_ops;
> return 0;
> }
>
> int __rte_cold
> -iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
> +iavf_rxq_vec_setup(struct iavf_rx_queue *rxq, const struct iavf_rxq_ops
> +**rxq_ops)
> {
> - rxq->ops = &sse_vec_rxq_ops;
> + *rxq_ops = &sse_vec_rxq_ops;
> return iavf_rxq_vec_setup_default(rxq); }
seems lots of redundant in iavf_rxtx_vec.sse.c
Can we move iavf_r(t)xq_vec_setup | sse_vec_r(t)xq_ops into iavf_rxtx.c and delete iavf_r(t)x_queue_release_mbufs_sse)?
Btw, We can keep this patch unchanged, a separated patch to refact the code is expected.
>
> --
> 2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode
@ 2022-04-02 9:33 Ke Zhang
0 siblings, 0 replies; 12+ messages in thread
From: Ke Zhang @ 2022-04-02 9:33 UTC (permalink / raw)
To: xiaoyun.li, jingjing.wu, beilei.xing, dev; +Cc: Ke Zhang
In the multi process environment, the sub process
operates on the shared memory and changes the
function pointer of the main process, resulting in
the failure to find the address of the function when
main preocess releasing, resulting in crash.
similar with commit<20b631efe785819eb77aabbf500b3352e5731bdb>
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
---
drivers/net/iavf/iavf_rxtx.c | 27 ++++++++++++++-----------
drivers/net/iavf/iavf_rxtx.h | 6 +++---
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 4 ++--
drivers/net/iavf/iavf_rxtx_vec_sse.c | 8 ++++----
4 files changed, 24 insertions(+), 21 deletions(-)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..197c03cd31 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -362,6 +362,9 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
}
}
+const struct iavf_rxq_ops *iavf_rxq_release_mbufs_ops;
+const struct iavf_txq_ops *iavf_txq_release_mbufs_ops;
+
static const struct iavf_rxq_ops def_rxq_ops = {
.release_mbufs = release_rxq_mbufs,
};
@@ -674,7 +677,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ iavf_rxq_release_mbufs_ops = &def_rxq_ops;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +814,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ iavf_txq_release_mbufs_ops = &def_txq_ops;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
@@ -943,7 +946,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops->release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -971,7 +974,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops->release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -986,7 +989,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops->release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1000,7 +1003,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops->release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1034,7 +1037,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops->release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -1042,7 +1045,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops->release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -2825,7 +2828,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- (void)iavf_rxq_vec_setup(rxq);
+ (void)iavf_rxq_vec_setup(rxq, &iavf_rxq_release_mbufs_ops);
}
if (dev->data->scattered_rx) {
@@ -3008,11 +3011,11 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
continue;
#ifdef CC_AVX512_SUPPORT
if (use_avx512)
- iavf_txq_vec_setup_avx512(txq);
+ iavf_txq_vec_setup_avx512(&iavf_txq_release_mbufs_ops);
else
- iavf_txq_vec_setup(txq);
+ iavf_txq_vec_setup(&iavf_txq_release_mbufs_ops);
#else
- iavf_txq_vec_setup(txq);
+ iavf_txq_vec_setup(&iavf_txq_release_mbufs_ops);
#endif
}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebbce8..7df501d784 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -657,8 +657,8 @@ uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
int iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
-int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
-int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
+int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq, const struct iavf_rxq_ops **rxq_ops);
+int iavf_txq_vec_setup(const struct iavf_txq_ops **txq_ops);
uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t iavf_recv_pkts_vec_avx512_offload(void *rx_queue,
@@ -687,7 +687,7 @@ uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t iavf_xmit_pkts_vec_avx512_offload(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
-int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
+int iavf_txq_vec_setup_avx512(const struct iavf_txq_ops **txq_ops);
uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 7319d4cb65..08de34c87c 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -2017,9 +2017,9 @@ static const struct iavf_txq_ops avx512_vec_txq_ops = {
};
int __rte_cold
-iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup_avx512(const struct iavf_txq_ops **txq_ops)
{
- txq->ops = &avx512_vec_txq_ops;
+ *txq_ops = &avx512_vec_txq_ops;
return 0;
}
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 717a227b2c..a782bed2e0 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -1219,16 +1219,16 @@ static const struct iavf_txq_ops sse_vec_txq_ops = {
};
int __rte_cold
-iavf_txq_vec_setup(struct iavf_tx_queue *txq)
+iavf_txq_vec_setup(const struct iavf_txq_ops **txq_ops)
{
- txq->ops = &sse_vec_txq_ops;
+ *txq_ops = &sse_vec_txq_ops;
return 0;
}
int __rte_cold
-iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
+iavf_rxq_vec_setup(struct iavf_rx_queue *rxq, const struct iavf_rxq_ops **rxq_ops)
{
- rxq->ops = &sse_vec_rxq_ops;
+ *rxq_ops = &sse_vec_rxq_ops;
return iavf_rxq_vec_setup_default(rxq);
}
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2022-05-20 3:15 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-25 8:36 [PATCH] net/iavf: when E810 VF interrupt disable, only receive 4 packets once, fix 4 to 1 Ke Zhang
2022-05-19 9:29 ` [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode Ke Zhang
2022-05-19 9:30 ` [PATCH v2] net/iavf: fix Rx queue interrupt setting Ke Zhang
2022-05-19 9:56 ` Zhang, Qi Z
2022-05-20 2:39 ` [PATCH v2 0/1] " Ke Zhang
2022-05-20 2:39 ` [PATCH v2 1/1] net/iavf: " Ke Zhang
2022-05-20 2:51 ` [PATCH v2 0/1] " Zhang, Qi Z
2022-05-20 3:00 ` [PATCH v2] net/iavf: " Ke Zhang
2022-05-20 3:15 ` Zhang, Qi Z
-- strict thread matches above, loose matches on Subject: below --
2022-04-02 9:51 [PATCH] net/iavf: fix iavf crashed on dev_stop when running in multi-process mode Ke Zhang
2022-04-03 2:07 ` Zhang, Qi Z
2022-04-02 9:33 Ke Zhang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).