From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4CA8AA0C4A; Thu, 8 Jul 2021 11:34:50 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7E9A2415BC; Thu, 8 Jul 2021 11:34:14 +0200 (CEST) Received: from qq.com (smtpbg556.qq.com [183.3.226.209]) by mails.dpdk.org (Postfix) with ESMTP id 26388415BD for ; Thu, 8 Jul 2021 11:34:12 +0200 (CEST) X-QQ-mid: bizesmtp44t1625736800t8bh3ci1 Received: from jiawenwu.trustnetic.com (unknown [183.129.236.74]) by esmtp6.qq.com (ESMTP) with id ; Thu, 08 Jul 2021 17:33:20 +0800 (CST) X-QQ-SSF: 01400000002000D0E000B00A0000000 X-QQ-FEAT: VvqLbp3JaDPD3qeEw4yLe/zIqpL8sAfXs6BdlqdlisFhxanl4iOxF8JbyEK4d QqO4VNnYdSVyBoHRFWA9EhgND0+dKG5Tud3Zw2aXTTtwMUvQm73OftKHJJ2GqdYK2qzVZyw Ih+pYh3d4Q0EOUUxQH9vO8S8xWIsKpPsZZ9dMDUPwk+4Ljg94ge/dr0eEIozPi1ROJN2Byt DauW/b+JL7W0SMWnHAL0wOtf068h1HWDT2gcBpDimc+ig4CmY21gAGdm1QqbaIbBEb59M5w RFdGchoyIHcDAm1AH8YxZ+7Gr5Lw/tfR6m0dxPdm6Jy6ji4dytJcmFJ6AW28lfI8qNTskHg yqRZSpBDvI+fax12yU2GrJM0UWxeVfxJ98spMo7WOFikmsZRrM= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Thu, 8 Jul 2021 17:32:35 +0800 Message-Id: <20210708093239.13896-16-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.21.0.windows.1 In-Reply-To: <20210708093239.13896-1-jiawenwu@trustnetic.com> References: <20210708093239.13896-1-jiawenwu@trustnetic.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign1 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH v8 15/19] net/ngbe: add Tx queue start and stop X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Initializes transmit unit, support to start and stop transmit unit for specified queues. Signed-off-by: Jiawen Wu --- doc/guides/nics/features/ngbe.ini | 1 + drivers/net/ngbe/base/ngbe_type.h | 1 + drivers/net/ngbe/ngbe_ethdev.c | 3 + drivers/net/ngbe/ngbe_ethdev.h | 7 ++ drivers/net/ngbe/ngbe_rxtx.c | 162 +++++++++++++++++++++++++++++- drivers/net/ngbe/ngbe_rxtx.h | 3 + 6 files changed, 175 insertions(+), 2 deletions(-) diff --git a/doc/guides/nics/features/ngbe.ini b/doc/guides/nics/features/ngbe.ini index 291a542a42..08d5f1b0dc 100644 --- a/doc/guides/nics/features/ngbe.ini +++ b/doc/guides/nics/features/ngbe.ini @@ -7,6 +7,7 @@ Speed capabilities = Y Link status = Y Link status event = Y +Queue start/stop = Y Multiprocess aware = Y Linux = Y ARMv8 = Y diff --git a/drivers/net/ngbe/base/ngbe_type.h b/drivers/net/ngbe/base/ngbe_type.h index 3f6698be15..2846a6a2b6 100644 --- a/drivers/net/ngbe/base/ngbe_type.h +++ b/drivers/net/ngbe/base/ngbe_type.h @@ -190,6 +190,7 @@ struct ngbe_hw { u16 nb_rx_queues; u16 nb_tx_queues; + u32 q_tx_regs[8 * 4]; bool is_pf; }; diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c index f88e71b855..f1911bdcbc 100644 --- a/drivers/net/ngbe/ngbe_ethdev.c +++ b/drivers/net/ngbe/ngbe_ethdev.c @@ -598,6 +598,7 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) ETH_LINK_SPEED_10M; /* Driver-preferred Rx/Tx parameters */ + dev_info->default_txportconf.burst_size = 32; dev_info->default_rxportconf.nb_queues = 1; dev_info->default_txportconf.nb_queues = 1; dev_info->default_rxportconf.ring_size = 256; @@ -1089,6 +1090,8 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = { .dev_start = ngbe_dev_start, .dev_stop = ngbe_dev_stop, .link_update = ngbe_dev_link_update, + .tx_queue_start = ngbe_dev_tx_queue_start, + .tx_queue_stop = ngbe_dev_tx_queue_stop, .rx_queue_setup = ngbe_dev_rx_queue_setup, .rx_queue_release = ngbe_dev_rx_queue_release, .tx_queue_setup = ngbe_dev_tx_queue_setup, diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h index 9e086eea58..5c2aea8d50 100644 --- a/drivers/net/ngbe/ngbe_ethdev.h +++ b/drivers/net/ngbe/ngbe_ethdev.h @@ -86,6 +86,13 @@ void ngbe_dev_tx_init(struct rte_eth_dev *dev); int ngbe_dev_rxtx_start(struct rte_eth_dev *dev); +void ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id); +void ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id); + +int ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +int ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); + void ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c index 6cb0465ac4..63f0647413 100644 --- a/drivers/net/ngbe/ngbe_rxtx.c +++ b/drivers/net/ngbe/ngbe_rxtx.c @@ -528,7 +528,32 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev) void ngbe_dev_tx_init(struct rte_eth_dev *dev) { - RTE_SET_USED(dev); + struct ngbe_hw *hw; + struct ngbe_tx_queue *txq; + uint64_t bus_addr; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + hw = ngbe_dev_hw(dev); + + wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_ODSA, NGBE_SECTXCTL_ODSA); + wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_XDSA, 0); + + /* Setup the Base and Length of the Tx Descriptor Rings */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + + bus_addr = txq->tx_ring_phys_addr; + wr32(hw, NGBE_TXBAL(txq->reg_idx), + (uint32_t)(bus_addr & BIT_MASK32)); + wr32(hw, NGBE_TXBAH(txq->reg_idx), + (uint32_t)(bus_addr >> 32)); + wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_BUFLEN_MASK, + NGBE_TXCFG_BUFLEN(txq->nb_tx_desc)); + /* Setup the HW Tx Head and TX Tail descriptor pointers */ + wr32(hw, NGBE_TXRP(txq->reg_idx), 0); + wr32(hw, NGBE_TXWP(txq->reg_idx), 0); + } } /* @@ -537,7 +562,140 @@ ngbe_dev_tx_init(struct rte_eth_dev *dev) int ngbe_dev_rxtx_start(struct rte_eth_dev *dev) { - RTE_SET_USED(dev); + struct ngbe_hw *hw; + struct ngbe_tx_queue *txq; + uint32_t dmatxctl; + uint16_t i; + int ret = 0; + + PMD_INIT_FUNC_TRACE(); + hw = ngbe_dev_hw(dev); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + /* Setup Transmit Threshold Registers */ + wr32m(hw, NGBE_TXCFG(txq->reg_idx), + NGBE_TXCFG_HTHRESH_MASK | + NGBE_TXCFG_WTHRESH_MASK, + NGBE_TXCFG_HTHRESH(txq->hthresh) | + NGBE_TXCFG_WTHRESH(txq->wthresh)); + } + + dmatxctl = rd32(hw, NGBE_DMATXCTRL); + dmatxctl |= NGBE_DMATXCTRL_ENA; + wr32(hw, NGBE_DMATXCTRL, dmatxctl); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (txq->tx_deferred_start == 0) { + ret = ngbe_dev_tx_queue_start(dev, i); + if (ret < 0) + return ret; + } + } return -EINVAL; } + +void +ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id) +{ + u32 *reg = &hw->q_tx_regs[tx_queue_id * 8]; + *(reg++) = rd32(hw, NGBE_TXBAL(tx_queue_id)); + *(reg++) = rd32(hw, NGBE_TXBAH(tx_queue_id)); + *(reg++) = rd32(hw, NGBE_TXCFG(tx_queue_id)); +} + +void +ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id) +{ + u32 *reg = &hw->q_tx_regs[tx_queue_id * 8]; + wr32(hw, NGBE_TXBAL(tx_queue_id), *(reg++)); + wr32(hw, NGBE_TXBAH(tx_queue_id), *(reg++)); + wr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA); +} + +/* + * Start Transmit Units for specified queue. + */ +int +ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_tx_queue *txq; + uint32_t txdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + txq = dev->data->tx_queues[tx_queue_id]; + wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, NGBE_TXCFG_ENA); + + /* Wait until Tx Enable ready */ + poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx)); + } while (--poll_ms && !(txdctl & NGBE_TXCFG_ENA)); + if (poll_ms == 0) + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", + tx_queue_id); + + rte_wmb(); + wr32(hw, NGBE_TXWP(txq->reg_idx), txq->tx_tail); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +/* + * Stop Transmit Units for specified queue. + */ +int +ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_tx_queue *txq; + uint32_t txdctl; + uint32_t txtdh, txtdt; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + txq = dev->data->tx_queues[tx_queue_id]; + + /* Wait until Tx queue is empty */ + poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_us(RTE_NGBE_WAIT_100_US); + txtdh = rd32(hw, NGBE_TXRP(txq->reg_idx)); + txtdt = rd32(hw, NGBE_TXWP(txq->reg_idx)); + } while (--poll_ms && (txtdh != txtdt)); + if (poll_ms == 0) + PMD_INIT_LOG(ERR, "Tx Queue %d is not empty when stopping.", + tx_queue_id); + + ngbe_dev_save_tx_queue(hw, txq->reg_idx); + wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, 0); + + /* Wait until Tx Enable bit clear */ + poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx)); + } while (--poll_ms && (txdctl & NGBE_TXCFG_ENA)); + if (poll_ms == 0) + PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d", + tx_queue_id); + + rte_delay_us(RTE_NGBE_WAIT_100_US); + ngbe_dev_store_tx_queue(hw, txq->reg_idx); + + if (txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h index 4b824d4ef8..03e98284a8 100644 --- a/drivers/net/ngbe/ngbe_rxtx.h +++ b/drivers/net/ngbe/ngbe_rxtx.h @@ -73,6 +73,9 @@ struct ngbe_tx_desc { #define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \ sizeof(struct ngbe_rx_desc)) +#define RTE_NGBE_REGISTER_POLL_WAIT_10_MS 10 +#define RTE_NGBE_WAIT_100_US 100 + #define NGBE_TX_MAX_SEG 40 #ifndef DEFAULT_TX_FREE_THRESH -- 2.21.0.windows.1