From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id DC067231C for ; Tue, 20 May 2014 07:25:37 +0200 (CEST) Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga101.jf.intel.com with ESMTP; 19 May 2014 22:25:45 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.98,871,1392192000"; d="scan'208";a="543334440" Received: from shilc102.sh.intel.com ([10.239.39.44]) by orsmga002.jf.intel.com with ESMTP; 19 May 2014 22:25:29 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shilc102.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id s4K5POLn024803; Tue, 20 May 2014 13:25:26 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id s4K5PKxS019969; Tue, 20 May 2014 13:25:22 +0800 Received: (from couyang@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id s4K5PKsB019965; Tue, 20 May 2014 13:25:20 +0800 From: Ouyang Changchun To: dev@dpdk.org Date: Tue, 20 May 2014 13:25:10 +0800 Message-Id: <1400563511-19848-3-git-send-email-changchun.ouyang@intel.com> X-Mailer: git-send-email 1.7.0.7 In-Reply-To: <1400563511-19848-1-git-send-email-changchun.ouyang@intel.com> References: <1400563511-19848-1-git-send-email-changchun.ouyang@intel.com> Subject: [dpdk-dev] [PATCH v2 2/3] ixgbe: Implement queue start and stop functionality in IXGBE PMD X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 20 May 2014 05:25:39 -0000 This patch implements queue start and stop functionality in IXGBE PMD; it also enables hardware loopback for VMDQ mode in IXGBE PMD. Signed-off-by: Ouyang Changchun --- lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 4 + lib/librte_pmd_ixgbe/ixgbe_ethdev.h | 8 ++ lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 233 ++++++++++++++++++++++++++++++------ lib/librte_pmd_ixgbe/ixgbe_rxtx.h | 6 + 4 files changed, 215 insertions(+), 36 deletions(-) diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c index 49ff0d1..62a6d77 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c @@ -275,6 +275,10 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = { .vlan_tpid_set = ixgbe_vlan_tpid_set, .vlan_offload_set = ixgbe_vlan_offload_set, .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, + .rx_queue_start = ixgbe_dev_rx_queue_start, + .rx_queue_stop = ixgbe_dev_rx_queue_stop, + .tx_queue_start = ixgbe_dev_tx_queue_start, + .tx_queue_stop = ixgbe_dev_tx_queue_stop, .rx_queue_setup = ixgbe_dev_rx_queue_setup, .rx_queue_release = ixgbe_dev_rx_queue_release, .rx_queue_count = ixgbe_dev_rx_queue_count, diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h index 7c6139b..ae52c8e 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h @@ -245,6 +245,14 @@ void ixgbe_dev_tx_init(struct rte_eth_dev *dev); void ixgbe_dev_rxtx_start(struct rte_eth_dev *dev); +int ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); + int ixgbevf_dev_rx_init(struct rte_eth_dev *dev); void ixgbevf_dev_tx_init(struct rte_eth_dev *dev); diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index 55414b9..2a98051 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -1588,7 +1588,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * descriptors should meet the following condition: * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0 */ -#define IXGBE_MIN_RING_DESC 64 +#define IXGBE_MIN_RING_DESC 32 #define IXGBE_MAX_RING_DESC 4096 /* @@ -1836,6 +1836,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->port_id = dev->data->port_id; txq->txq_flags = tx_conf->txq_flags; txq->ops = &def_txq_ops; + txq->start_tx_per_q= tx_conf->start_tx_per_q; /* * Modification to set VFTDT for virtual function if vf is detected @@ -2078,6 +2079,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN); rxq->drop_en = rx_conf->rx_drop_en; + rxq->start_rx_per_q= rx_conf->start_rx_per_q; /* * Allocate RX ring hardware descriptors. A memzone large enough to @@ -3025,6 +3027,14 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev) } + /* PFDMA Tx General Switch Control Enables VMDQ loopback */ + if (cfg->enable_loop_back){ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + for(i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++) { + IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX); + } + } + IXGBE_WRITE_FLUSH(hw); } @@ -3234,7 +3244,6 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) uint32_t rxcsum; uint16_t buf_size; uint16_t i; - int ret; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3289,11 +3298,6 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - /* Allocate buffers for descriptor rings */ - ret = ixgbe_alloc_rx_queue_mbufs(rxq); - if (ret) - return ret; - /* * Reset crc_len in case it was changed after queue setup by a * call to configure. @@ -3500,10 +3504,8 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) struct igb_rx_queue *rxq; uint32_t txdctl; uint32_t dmatxctl; - uint32_t rxdctl; uint32_t rxctrl; uint16_t i; - int poll_ms; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3526,55 +3528,214 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); - txdctl |= IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); - - /* Wait until TX Enable ready */ - if (hw->mac.type == ixgbe_mac_82599EB) { - poll_ms = 10; - do { - rte_delay_ms(1); - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); - } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); - if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable " - "Tx Queue %d\n", i); + if (!txq->start_tx_per_q) { + ixgbe_dev_tx_queue_start(dev, i); } } + for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; + if (!rxq->start_rx_per_q) { + ixgbe_dev_rx_queue_start(dev, i); + } + } + + /* Enable Receive engine */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (hw->mac.type == ixgbe_mac_82598EB) + rxctrl |= IXGBE_RXCTRL_DMBYPS; + rxctrl |= IXGBE_RXCTRL_RXEN; + hw->mac.ops.enable_rx_dma(hw, rxctrl); + + /* If loopback mode is enabled for 82599, set up the link accordingly */ + if (hw->mac.type == ixgbe_mac_82599EB && + dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) + ixgbe_setup_loopback_link_82599(hw); + +} + +/* + * Start Receive Units for specified queue. + */ +int +ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ixgbe_hw *hw; + struct igb_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + + /* Allocate buffers for descriptor rings */ + if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) { + PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d\n", rx_queue_id); + return -1; + } rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); rxdctl |= IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); /* Wait until RX Enable ready */ - poll_ms = 10; + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; do { rte_delay_ms(1); rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable " - "Rx Queue %d\n", i); + "Rx Queue %d\n", rx_queue_id); rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); - } + } else + return -1; - /* Enable Receive engine */ - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - if (hw->mac.type == ixgbe_mac_82598EB) - rxctrl |= IXGBE_RXCTRL_DMBYPS; - rxctrl |= IXGBE_RXCTRL_RXEN; - hw->mac.ops.enable_rx_dma(hw, rxctrl); + return 0; +} - /* If loopback mode is enabled for 82599, set up the link accordingly */ - if (hw->mac.type == ixgbe_mac_82599EB && - dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) - ixgbe_setup_loopback_link_82599(hw); +/* + * Stop Receive Units for specified queue. + */ +int +ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ixgbe_hw *hw; + struct igb_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (rx_queue_id < dev->data->nb_rx_queues) { + rxq = dev->data->rx_queues[rx_queue_id]; + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable " + "Rx Queue %d\n", rx_queue_id); + + rte_delay_us(RTE_IXGBE_WAIT_100_US); + + ixgbe_rx_queue_release_mbufs(rxq); + ixgbe_reset_rx_queue(rxq); + } else + return -1; + + return 0; +} + + +/* + * Start Transmit Units for specified queue. + */ +int +ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ixgbe_hw *hw; + struct igb_tx_queue *txq; + uint32_t txdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (tx_queue_id < dev->data->nb_tx_queues) { + txq = dev->data->tx_queues[tx_queue_id]; + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + + /* Wait until TX Enable ready */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable " + "Tx Queue %d\n", tx_queue_id); + } + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0); + } else + return -1; + + return 0; } +/* + * Stop Transmit Units for specified queue. + */ +int +ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ixgbe_hw *hw; + struct igb_tx_queue *txq; + uint32_t txdctl; + uint32_t txtdh, txtdt; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (tx_queue_id < dev->data->nb_tx_queues) { + txq = dev->data->tx_queues[tx_queue_id]; + + /* Wait until TX queue is empty */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_us(RTE_IXGBE_WAIT_100_US); + txtdh = IXGBE_READ_REG(hw, IXGBE_TDH(txq->reg_idx)); + txtdt = IXGBE_READ_REG(hw, IXGBE_TDT(txq->reg_idx)); + } while (--poll_ms && (txtdh != txtdt)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Tx Queue %d is not empty when stopping.\n", + tx_queue_id); + } + + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + + /* Wait until TX Enable ready */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable " + "Tx Queue %d\n", tx_queue_id); + } + + if (txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } + } else + return -1; + + return 0; +} /* * [VF] Initializes Receive Unit. diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h index 446eeb7..f9d0177 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h @@ -67,6 +67,10 @@ #define rte_packet_prefetch(p) do {} while(0) #endif +#define RTE_IXGBE_REGISTER_POLL_WAIT_10_MS 10 +#define RTE_IXGBE_WAIT_100_US 100 +#define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2 + /** * Structure associated with each descriptor of the RX ring of a RX queue. */ @@ -129,6 +133,7 @@ struct igb_rx_queue { uint8_t port_id; /**< Device port identifier. */ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ + uint8_t start_rx_per_q; #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /** need to alloc dummy mbuf, for wraparound when scanning hw ring */ struct rte_mbuf fake_mbuf; @@ -193,6 +198,7 @@ struct igb_tx_queue { /** Hardware context0 history. */ struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; struct ixgbe_txq_ops *ops; /**< txq ops */ + uint8_t start_tx_per_q; }; struct ixgbe_txq_ops { -- 1.9.0