From: Ye Xiaolong <xiaolong.ye@intel.com>
To: Qiming Yang <qiming.yang@intel.com>
Cc: dev@dpdk.org, beilei.xing@intel.com
Subject: Re: [dpdk-dev] [PATCH v2] net/i40e: fix queue related exception handling
Date: Tue, 19 May 2020 09:56:53 +0800 [thread overview]
Message-ID: <20200519015653.GB38911@intel.com> (raw)
In-Reply-To: <20200518054553.19306-1-qiming.yang@intel.com>
On 05/18, Qiming Yang wrote:
>There should have different behavior in queue start fail and stop fail case.
>When queue start fail, all the next actions should be terminated and then
>started queues should be cleared. But for queue stop stage, one queue stop
>fail should not end other queues stop. This patch fixed that issue in PF
>and VF.
>
>Fixes: b6583ee40265 ("i40e: full VMDQ pools support")
>Fixes: 3f6a696f1054 ("i40evf: queue start and stop")
Need to cc stable.
>
>Signed-off-by: Qiming Yang <qiming.yang@intel.com>
>---
> drivers/net/i40e/i40e_ethdev.c | 116 ++++++++------------------------------
> drivers/net/i40e/i40e_ethdev_vf.c | 2 -
> drivers/net/i40e/i40e_rxtx.c | 28 +++++++++
> 3 files changed, 53 insertions(+), 93 deletions(-)
>
>diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
>index 9fbda1c..024178d 100644
>--- a/drivers/net/i40e/i40e_ethdev.c
>+++ b/drivers/net/i40e/i40e_ethdev.c
>@@ -2276,6 +2276,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
> struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
> uint32_t intr_vector = 0;
> struct i40e_vsi *vsi;
>+ uint16_t nb_rxq, nb_txq;
>
> hw->adapter_stopped = 0;
>
>@@ -2307,7 +2308,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
> ret = i40e_dev_rxtx_init(pf);
> if (ret != I40E_SUCCESS) {
> PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
>- goto err_up;
>+ return ret;
> }
>
> /* Map queues with MSIX interrupt */
>@@ -2332,10 +2333,16 @@ i40e_dev_start(struct rte_eth_dev *dev)
> }
>
> /* Enable all queues which have been configured */
>- ret = i40e_dev_switch_queues(pf, TRUE);
>- if (ret != I40E_SUCCESS) {
>- PMD_DRV_LOG(ERR, "Failed to enable VSI");
>- goto err_up;
>+ for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) {
>+ ret = i40e_dev_rx_queue_start(dev, nb_rxq);
>+ if (ret)
>+ goto rx_err;
>+ }
>+
>+ for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) {
>+ ret = i40e_dev_tx_queue_start(dev, nb_txq);
>+ if (ret)
>+ goto tx_err;
> }
>
> /* Enable receiving broadcast packets */
>@@ -2365,7 +2372,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
> ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
> if (ret != I40E_SUCCESS) {
> PMD_DRV_LOG(ERR, "fail to set loopback link");
>- goto err_up;
>+ goto tx_err;
> }
> }
>
>@@ -2373,7 +2380,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
> ret = i40e_apply_link_speed(dev);
> if (I40E_SUCCESS != ret) {
> PMD_DRV_LOG(ERR, "Fail to apply link setting");
>- goto err_up;
>+ goto tx_err;
> }
>
> if (!rte_intr_allow_others(intr_handle)) {
>@@ -2416,9 +2423,12 @@ i40e_dev_start(struct rte_eth_dev *dev)
>
> return I40E_SUCCESS;
>
>-err_up:
>- i40e_dev_switch_queues(pf, FALSE);
>- i40e_dev_clear_queues(dev);
>+tx_err:
>+ for (i = 0; i < nb_txq; i++)
>+ i40e_dev_tx_queue_stop(dev, i);
>+rx_err:
>+ for (i = 0; i < nb_rxq; i++)
>+ i40e_dev_rx_queue_stop(dev, i);
>
> return ret;
> }
>@@ -2442,7 +2452,11 @@ i40e_dev_stop(struct rte_eth_dev *dev)
> }
>
> /* Disable all queues */
>- i40e_dev_switch_queues(pf, FALSE);
>+ for (i = 0; i < dev->data->nb_tx_queues; i++)
>+ i40e_dev_tx_queue_stop(dev, i);
>+
>+ for (i = 0; i < dev->data->nb_rx_queues; i++)
>+ i40e_dev_rx_queue_stop(dev, i);
>
> /* un-map queues with interrupt registers */
> i40e_vsi_disable_queues_intr(main_vsi);
>@@ -6278,33 +6292,6 @@ i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
> return I40E_SUCCESS;
> }
>
>-/* Swith on or off the tx queues */
>-static int
>-i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
>-{
>- struct rte_eth_dev_data *dev_data = pf->dev_data;
>- struct i40e_tx_queue *txq;
>- struct rte_eth_dev *dev = pf->adapter->eth_dev;
>- uint16_t i;
>- int ret;
>-
>- for (i = 0; i < dev_data->nb_tx_queues; i++) {
>- txq = dev_data->tx_queues[i];
>- /* Don't operate the queue if not configured or
>- * if starting only per queue */
>- if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
>- continue;
>- if (on)
>- ret = i40e_dev_tx_queue_start(dev, i);
>- else
>- ret = i40e_dev_tx_queue_stop(dev, i);
>- if ( ret != I40E_SUCCESS)
>- return ret;
>- }
>-
>- return I40E_SUCCESS;
>-}
>-
> int
> i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
> {
>@@ -6356,59 +6343,6 @@ i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
>
> return I40E_SUCCESS;
> }
>-/* Switch on or off the rx queues */
>-static int
>-i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
>-{
>- struct rte_eth_dev_data *dev_data = pf->dev_data;
>- struct i40e_rx_queue *rxq;
>- struct rte_eth_dev *dev = pf->adapter->eth_dev;
>- uint16_t i;
>- int ret;
>-
>- for (i = 0; i < dev_data->nb_rx_queues; i++) {
>- rxq = dev_data->rx_queues[i];
>- /* Don't operate the queue if not configured or
>- * if starting only per queue */
>- if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
>- continue;
>- if (on)
>- ret = i40e_dev_rx_queue_start(dev, i);
>- else
>- ret = i40e_dev_rx_queue_stop(dev, i);
>- if (ret != I40E_SUCCESS)
>- return ret;
>- }
>-
>- return I40E_SUCCESS;
>-}
>-
>-/* Switch on or off all the rx/tx queues */
>-int
>-i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
>-{
>- int ret;
>-
>- if (on) {
>- /* enable rx queues before enabling tx queues */
>- ret = i40e_dev_switch_rx_queues(pf, on);
>- if (ret) {
>- PMD_DRV_LOG(ERR, "Failed to switch rx queues");
>- return ret;
>- }
>- ret = i40e_dev_switch_tx_queues(pf, on);
>- } else {
>- /* Stop tx queues before stopping rx queues */
>- ret = i40e_dev_switch_tx_queues(pf, on);
>- if (ret) {
>- PMD_DRV_LOG(ERR, "Failed to switch tx queues");
>- return ret;
>- }
>- ret = i40e_dev_switch_rx_queues(pf, on);
>- }
>-
>- return ret;
>-}
>
> /* Initialize VSI for TX */
> static int
>diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
>index c34f520..2472610 100644
>--- a/drivers/net/i40e/i40e_ethdev_vf.c
>+++ b/drivers/net/i40e/i40e_ethdev_vf.c
>@@ -789,7 +789,6 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
> for (i = 0; i < dev->data->nb_tx_queues; i++) {
> if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
> PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
>- return -1;
> }
> }
>
>@@ -797,7 +796,6 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
> for (i = 0; i < dev->data->nb_rx_queues; i++) {
> if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
> PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
>- return -1;
> }
> }
>
>diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
>index f6d23c9..035eb35 100644
>--- a/drivers/net/i40e/i40e_rxtx.c
>+++ b/drivers/net/i40e/i40e_rxtx.c
>@@ -1570,6 +1570,15 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
> PMD_INIT_FUNC_TRACE();
>
> rxq = dev->data->rx_queues[rx_queue_id];
>+ if (!rxq || !rxq->q_set) {
>+ PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
>+ rx_queue_id);
>+ return -EINVAL;
>+ }
>+
>+ if (rxq->rx_deferred_start)
>+ PMD_DRV_LOG(WARNING, "RX queue %u is deferrd start",
>+ rx_queue_id);
>
> err = i40e_alloc_rx_queue_mbufs(rxq);
> if (err) {
>@@ -1602,6 +1611,11 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
> struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>
> rxq = dev->data->rx_queues[rx_queue_id];
>+ if (!rxq || !rxq->q_set) {
>+ PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
>+ rx_queue_id);
>+ return -EINVAL;
>+ }
>
> /*
> * rx_queue_id is queue id application refers to, while
>@@ -1630,6 +1644,15 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
> PMD_INIT_FUNC_TRACE();
>
> txq = dev->data->tx_queues[tx_queue_id];
>+ if (!txq || !txq->q_set) {
>+ PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
>+ tx_queue_id);
>+ return -EINVAL;
>+ }
>+
>+ if (txq->tx_deferred_start)
>+ PMD_DRV_LOG(WARNING, "TX queue %u is deferrd start",
>+ tx_queue_id);
>
> /*
> * tx_queue_id is queue id application refers to, while
>@@ -1654,6 +1677,11 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
> struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>
> txq = dev->data->tx_queues[tx_queue_id];
>+ if (!txq || !txq->q_set) {
>+ PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
>+ tx_queue_id);
>+ return -EINVAL;
>+ }
>
> /*
> * tx_queue_id is queue id application refers to, while
>--
>2.9.5
>
Acked-by: Xiaolong Ye <xiaolong.ye@intel.com>
Applied to dpdk-next-net-intel, Thanks.
prev parent reply other threads:[~2020-05-19 2:05 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-13 7:56 [dpdk-dev] [PATCH] " Qiming Yang
2020-05-14 7:44 ` [dpdk-dev] [dpdk-stable] " Ye Xiaolong
2020-05-14 8:40 ` Yang, Qiming
2020-05-18 5:45 ` [dpdk-dev] [PATCH v2] " Qiming Yang
2020-05-19 1:56 ` Ye Xiaolong [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200519015653.GB38911@intel.com \
--to=xiaolong.ye@intel.com \
--cc=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=qiming.yang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).