From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id A842D8D38 for ; Tue, 7 Jun 2016 08:13:05 +0200 (CEST) Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga101.jf.intel.com with ESMTP; 06 Jun 2016 23:13:04 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.26,432,1459839600"; d="scan'208";a="992470232" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga002.jf.intel.com with ESMTP; 06 Jun 2016 23:13:05 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id u576D1HL029987; Tue, 7 Jun 2016 14:13:01 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id u576CwkX005579; Tue, 7 Jun 2016 14:13:00 +0800 Received: (from zhetao@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id u576CwuJ005575; Tue, 7 Jun 2016 14:12:58 +0800 From: Zhe Tao To: dev@dpdk.org Cc: wenzhuo.lu@intel.com, zhe.tao@intel.com, konstantin.ananyev@intel.com, bruce.richardson@intel.com, jing.d.chen@intel.com, cunming.liang@intel.com, jingjing.wu@intel.com, helin.zhang@intel.com Date: Tue, 7 Jun 2016 14:12:44 +0800 Message-Id: <1465279968-5515-5-git-send-email-zhe.tao@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1465279968-5515-1-git-send-email-zhe.tao@intel.com> References: <1465279968-5515-1-git-send-email-zhe.tao@intel.com> Subject: [dpdk-dev] [PATCH v3 4/8] ixgbe: implement device reset on VF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 07 Jun 2016 06:13:06 -0000 Implement the device reset function. 1, Add the fake RX/TX functions. 2, The reset function tries to stop RX/TX by replacing the RX/TX functions with the fake ones and getting the locks to make sure the regular RX/TX finished. 3, After the RX/TX stopped, reset the VF port, and then release the locks and restore the RX/TX functions. Signed-off-by: Wenzhuo Lu --- doc/guides/rel_notes/release_16_07.rst | 9 +++ drivers/net/ixgbe/ixgbe_ethdev.c | 108 ++++++++++++++++++++++++++++++++- drivers/net/ixgbe/ixgbe_ethdev.h | 12 +++- drivers/net/ixgbe/ixgbe_rxtx.c | 42 ++++++++++++- 4 files changed, 168 insertions(+), 3 deletions(-) diff --git a/doc/guides/rel_notes/release_16_07.rst b/doc/guides/rel_notes/release_16_07.rst index a761e3c..d36c4b1 100644 --- a/doc/guides/rel_notes/release_16_07.rst +++ b/doc/guides/rel_notes/release_16_07.rst @@ -53,6 +53,15 @@ New Features VF. To handle this link up/down event, add the mailbox interruption support to receive the message. +* **Added device reset support for ixgbe VF.** + + Added the device reset API. APP can call this API to reset the VF port + when it's not working. + Based on the mailbox interruption support, when VF reseives the control + message from PF, it means the PF link state changes, VF uses the reset + callback in the message handler to notice the APP. APP need call the device + reset API to reset the VF port. + Resolved Issues --------------- diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index fd2682f..1e3520b 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -381,6 +381,8 @@ static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); +static int ixgbevf_dev_reset(struct rte_eth_dev *dev); + /* * Define VF Stats MACRO for Non "cleared on read" register */ @@ -586,6 +588,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .reta_query = ixgbe_dev_rss_reta_query, .rss_hash_update = ixgbe_dev_rss_hash_update, .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, + .dev_reset = ixgbevf_dev_reset, }; /* store statistics names and its offset in stats structure */ @@ -4060,7 +4063,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) ETH_VLAN_EXTEND_MASK; ixgbevf_vlan_offload_set(dev, mask); - ixgbevf_dev_rxtx_start(dev); + if (ixgbevf_dev_rxtx_start(dev)) + return -1; /* check and configure queue intr-vector mapping */ if (dev->data->dev_conf.intr_conf.rxq != 0) { @@ -7193,6 +7197,108 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev) } static int +ixgbevf_dev_reset(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + int diag = 0; + uint32_t vteiam; + uint16_t i; + struct ixgbe_rx_queue *rxq; + struct ixgbe_tx_queue *txq; + + /* Nothing needs to be done if the device is not started. */ + if (!dev->data->dev_started) + return 0; + + PMD_DRV_LOG(DEBUG, "Link up/down event detected."); + + /** + * Stop RX/TX by fake functions and locks. + * Fake functions are used to make RX/TX lock easier. + */ + adapter->rx_backup = dev->rx_pkt_burst; + adapter->tx_backup = dev->tx_pkt_burst; + dev->rx_pkt_burst = ixgbevf_recv_pkts_fake; + dev->tx_pkt_burst = ixgbevf_xmit_pkts_fake; + + if (dev->data->rx_queues) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rte_spinlock_lock(&rxq->rx_lock); + } + + if (dev->data->tx_queues) + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + rte_spinlock_lock(&txq->tx_lock); + } + + /* Performance VF reset. */ + do { + dev->data->dev_started = 0; + ixgbevf_dev_stop(dev); + if (dev->data->dev_conf.intr_conf.lsc == 0) + diag = ixgbe_dev_link_update(dev, 0); + if (diag) { + PMD_INIT_LOG(INFO, "Ixgbe VF reset: " + "Failed to update link."); + } + rte_delay_ms(1000); + + diag = ixgbevf_dev_start(dev); + /*If fail to start the device, need to stop/start it again. */ + if (diag) { + PMD_INIT_LOG(ERR, "Ixgbe VF reset: " + "Failed to start device."); + continue; + } + dev->data->dev_started = 1; + ixgbevf_dev_stats_reset(dev); + if (dev->data->dev_conf.intr_conf.lsc == 0) + diag = ixgbe_dev_link_update(dev, 0); + if (diag) { + PMD_INIT_LOG(INFO, "Ixgbe VF reset: " + "Failed to update link."); + diag = 0; + } + + /** + * When the PF link is down, there has chance + * that VF cannot operate its registers. Will + * check if the registers is written + * successfully. If not, repeat stop/start until + * the PF link is up, in other words, until the + * registers can be written. + */ + vteiam = IXGBE_READ_REG(hw, IXGBE_VTEIAM); + /* Reference ixgbevf_intr_enable when checking */ + } while (diag || vteiam != IXGBE_VF_IRQ_ENABLE_MASK); + + /** + * Release the locks for queues. + * Restore the RX/TX functions. + */ + if (dev->data->rx_queues) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rte_spinlock_unlock(&rxq->rx_lock); + } + + if (dev->data->tx_queues) + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + rte_spinlock_unlock(&txq->tx_lock); + } + + dev->rx_pkt_burst = adapter->rx_backup; + dev->tx_pkt_burst = adapter->tx_backup; + + return 0; +} + +static int ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) { uint32_t eicr; diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index 701107b..d50fad4 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -289,6 +289,8 @@ struct ixgbe_adapter { struct rte_timecounter systime_tc; struct rte_timecounter rx_tstamp_tc; struct rte_timecounter tx_tstamp_tc; + eth_rx_burst_t rx_backup; + eth_tx_burst_t tx_backup; }; #define IXGBE_DEV_PRIVATE_TO_HW(adapter)\ @@ -377,7 +379,7 @@ int ixgbevf_dev_rx_init(struct rte_eth_dev *dev); void ixgbevf_dev_tx_init(struct rte_eth_dev *dev); -void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev); +int ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev); uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); @@ -409,6 +411,14 @@ uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t ixgbevf_recv_pkts_fake(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t ixgbevf_xmit_pkts_fake(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + uint16_t ixgbe_xmit_pkts_lock(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index a45d115..b4e7659 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -5181,7 +5181,7 @@ ixgbevf_dev_tx_init(struct rte_eth_dev *dev) /* * [VF] Start Transmit and Receive Units. */ -void __attribute__((cold)) +int __attribute__((cold)) ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) { struct ixgbe_hw *hw; @@ -5218,7 +5218,15 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!poll_ms) +#ifndef RTE_NEXT_ABI PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); +#else + { + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); + if (dev->data->dev_conf.txmode.lock_mode) + return -1; + } +#endif } for (i = 0; i < dev->data->nb_rx_queues; i++) { @@ -5235,11 +5243,21 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!poll_ms) +#ifndef RTE_NEXT_ABI + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); +#else + { PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); + if (dev->data->dev_conf.rxmode.lock_mode) + return -1; + } +#endif rte_wmb(); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1); } + + return 0; } /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */ @@ -5290,3 +5308,25 @@ ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq) { return -1; } + +/** + * A fake function to stop receiption. + */ +uint16_t +ixgbevf_recv_pkts_fake(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +/** + * A fake function to stop transmission. + */ +uint16_t +ixgbevf_xmit_pkts_fake(void __rte_unused *tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} -- 2.1.4