From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 034C4A04B7; Wed, 14 Oct 2020 08:03:51 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D529B1DC9A; Wed, 14 Oct 2020 07:54:46 +0200 (CEST) Received: from smtpbguseast2.qq.com (smtpbguseast2.qq.com [54.204.34.130]) by dpdk.org (Postfix) with ESMTP id 7DD4B1DC52 for ; Wed, 14 Oct 2020 07:54:33 +0200 (CEST) X-QQ-mid: bizesmtp28t1602654869ta6vwuyj Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp10.qq.com (ESMTP) with id ; Wed, 14 Oct 2020 13:54:29 +0800 (CST) X-QQ-SSF: 01400000000000C0C000B00A0000000 X-QQ-FEAT: Ry58bBY793sVQOyqHMcicuvohh8nyWgDibBkWv6QBpvUB7zUbNaQP6fKvrFTH X9oXAoCTJ8lJbFttmIeAA1vfHulrKbbsgL9BEazzPpSwVCuP7f6aeOxKPkDF1XnbXjNfKMQ DXDH4fJsIKOjvYbvphTLwNYJxXWEMvlgHEP5Nh9h3G+UO0nPlNwXAvIW6v73d6bznBkeo5J 2fzNk6TJwW8SdGtL1t2fLaj2TPMFUSLYRPb1hg7/2B+btnX6kobyVLX/TY+dQvLqK4YoGTF put/Gcx3zjX0TkClayMwQJ009WsqpAnod2h8JYpnRBqwdrTKfKJoUpUm8pIgzra29GoxnyB uhu3ilJnvJlF8/l/V6MXTuAgNkIDw== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Wed, 14 Oct 2020 13:54:43 +0800 Message-Id: <20201014055517.1214386-23-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201014055517.1214386-1-jiawenwu@trustnetic.com> References: <20201014055517.1214386-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign6 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH v3 22/56] net/txgbe: add Rx and Tx start and stop X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add receive and transmit units start and stop for specified queue. Signed-off-by: Jiawen Wu --- doc/guides/nics/features/txgbe.ini | 1 + drivers/net/txgbe/base/txgbe_type.h | 3 + drivers/net/txgbe/txgbe_ethdev.c | 6 + drivers/net/txgbe/txgbe_ethdev.h | 15 ++ drivers/net/txgbe/txgbe_rxtx.c | 256 ++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_rxtx.h | 12 ++ 6 files changed, 293 insertions(+) diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini index 707f64131..e76e9af46 100644 --- a/doc/guides/nics/features/txgbe.ini +++ b/doc/guides/nics/features/txgbe.ini @@ -7,6 +7,7 @@ Speed capabilities = Y Link status = Y Link status event = Y +Queue start/stop = Y Jumbo frame = Y Scattered Rx = Y Unicast MAC filter = Y diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h index 747ada0f9..5237200d4 100644 --- a/drivers/net/txgbe/base/txgbe_type.h +++ b/drivers/net/txgbe/base/txgbe_type.h @@ -469,6 +469,9 @@ struct txgbe_hw { TXGBE_SW_RESET, TXGBE_GLOBAL_RESET } reset_type; + + u32 q_rx_regs[128 * 4]; + u32 q_tx_regs[128 * 4]; }; #include "txgbe_regs.h" diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 6186cace1..dd13f73b2 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -566,6 +566,8 @@ txgbe_dev_close(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + txgbe_dev_free_queues(dev); + dev->dev_ops = NULL; /* disable uio intr before callback unregister */ @@ -1322,6 +1324,10 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .dev_infos_get = txgbe_dev_info_get, .dev_set_link_up = txgbe_dev_set_link_up, .dev_set_link_down = txgbe_dev_set_link_down, + .rx_queue_start = txgbe_dev_rx_queue_start, + .rx_queue_stop = txgbe_dev_rx_queue_stop, + .tx_queue_start = txgbe_dev_tx_queue_start, + .tx_queue_stop = txgbe_dev_tx_queue_stop, .rx_queue_setup = txgbe_dev_rx_queue_setup, .rx_queue_release = txgbe_dev_rx_queue_release, .tx_queue_setup = txgbe_dev_tx_queue_setup, diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 6636b6e9a..1a29281a8 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -80,6 +80,8 @@ struct txgbe_adapter { /* * RX/TX function prototypes */ +void txgbe_dev_free_queues(struct rte_eth_dev *dev); + void txgbe_dev_rx_queue_release(void *rxq); void txgbe_dev_tx_queue_release(void *txq); @@ -97,6 +99,19 @@ int txgbe_dev_rx_init(struct rte_eth_dev *dev); void txgbe_dev_tx_init(struct rte_eth_dev *dev); +void txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id); +void txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id); +void txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id); +void txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id); + +int txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); + +int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); + void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index 707d5b2e4..d6ba1545c 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -10,6 +10,9 @@ #include #include +#include +#include +#include #include #include #include @@ -622,12 +625,64 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, return 0; } +void +txgbe_dev_free_queues(struct rte_eth_dev *dev) +{ + unsigned int i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + txgbe_dev_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txgbe_dev_tx_queue_release(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; +} + void __rte_cold txgbe_set_rx_function(struct rte_eth_dev *dev) { RTE_SET_USED(dev); } +static int __rte_cold +txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq) +{ + struct txgbe_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned int i; + + /* Initialize software ring entries */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile struct txgbe_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u", + (unsigned int)rxq->queue_id); + return -ENOMEM; + } + + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->port = rxq->port_id; + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxd = &rxq->rx_ring[i]; + TXGBE_RXD_HDRADDR(rxd, 0); + TXGBE_RXD_PKTADDR(rxd, dma_addr); + rxe[i].mbuf = mbuf; + } + + return 0; +} + /** * txgbe_get_rscctl_maxdesc * @@ -958,3 +1013,204 @@ txgbe_dev_tx_init(struct rte_eth_dev *dev) } } +void +txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id) +{ + u32 *reg = &hw->q_rx_regs[rx_queue_id * 8]; + *(reg++) = rd32(hw, TXGBE_RXBAL(rx_queue_id)); + *(reg++) = rd32(hw, TXGBE_RXBAH(rx_queue_id)); + *(reg++) = rd32(hw, TXGBE_RXCFG(rx_queue_id)); +} + +void +txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id) +{ + u32 *reg = &hw->q_rx_regs[rx_queue_id * 8]; + wr32(hw, TXGBE_RXBAL(rx_queue_id), *(reg++)); + wr32(hw, TXGBE_RXBAH(rx_queue_id), *(reg++)); + wr32(hw, TXGBE_RXCFG(rx_queue_id), *(reg++) & ~TXGBE_RXCFG_ENA); +} + +void +txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id) +{ + u32 *reg = &hw->q_tx_regs[tx_queue_id * 8]; + *(reg++) = rd32(hw, TXGBE_TXBAL(tx_queue_id)); + *(reg++) = rd32(hw, TXGBE_TXBAH(tx_queue_id)); + *(reg++) = rd32(hw, TXGBE_TXCFG(tx_queue_id)); +} + +void +txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id) +{ + u32 *reg = &hw->q_tx_regs[tx_queue_id * 8]; + wr32(hw, TXGBE_TXBAL(tx_queue_id), *(reg++)); + wr32(hw, TXGBE_TXBAH(tx_queue_id), *(reg++)); + wr32(hw, TXGBE_TXCFG(tx_queue_id), *(reg++) & ~TXGBE_TXCFG_ENA); +} + +/* + * Start Receive Units for specified queue. + */ +int __rte_cold +txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + rxq = dev->data->rx_queues[rx_queue_id]; + + /* Allocate buffers for descriptor rings */ + if (txgbe_alloc_rx_queue_mbufs(rxq) != 0) { + PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", + rx_queue_id); + return -1; + } + rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); + rxdctl |= TXGBE_RXCFG_ENA; + wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); + } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id); + rte_wmb(); + wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0); + wr32(hw, TXGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +/* + * Stop Receive Units for specified queue. + */ +int __rte_cold +txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); + struct txgbe_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + rxq = dev->data->rx_queues[rx_queue_id]; + + txgbe_dev_save_rx_queue(hw, rxq->reg_idx); + wr32m(hw, TXGBE_RXCFG(rxq->reg_idx), TXGBE_RXCFG_ENA, 0); + + /* Wait until RX Enable bit clear */ + poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx)); + } while (--poll_ms && (rxdctl & TXGBE_RXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id); + + rte_delay_us(RTE_TXGBE_WAIT_100_US); + txgbe_dev_store_rx_queue(hw, rxq->reg_idx); + + txgbe_rx_queue_release_mbufs(rxq); + txgbe_reset_rx_queue(adapter, rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +/* + * Start Transmit Units for specified queue. + */ +int __rte_cold +txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_tx_queue *txq; + uint32_t txdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + txq = dev->data->tx_queues[tx_queue_id]; + wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA); + + /* Wait until TX Enable ready */ + poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx)); + } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable " + "Tx Queue %d", tx_queue_id); + + rte_wmb(); + wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +/* + * Stop Transmit Units for specified queue. + */ +int __rte_cold +txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + struct txgbe_tx_queue *txq; + uint32_t txdctl; + uint32_t txtdh, txtdt; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + txq = dev->data->tx_queues[tx_queue_id]; + + /* Wait until TX queue is empty */ + poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_us(RTE_TXGBE_WAIT_100_US); + txtdh = rd32(hw, TXGBE_TXRP(txq->reg_idx)); + txtdt = rd32(hw, TXGBE_TXWP(txq->reg_idx)); + } while (--poll_ms && (txtdh != txtdt)); + if (!poll_ms) + PMD_INIT_LOG(ERR, + "Tx Queue %d is not empty when stopping.", + tx_queue_id); + + txgbe_dev_save_tx_queue(hw, txq->reg_idx); + wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, 0); + + /* Wait until TX Enable bit clear */ + poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx)); + } while (--poll_ms && (txdctl & TXGBE_TXCFG_ENA)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d", + tx_queue_id); + + rte_delay_us(RTE_TXGBE_WAIT_100_US); + txgbe_dev_store_tx_queue(hw, txq->reg_idx); + + if (txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h index be165dd19..5b991e304 100644 --- a/drivers/net/txgbe/txgbe_rxtx.h +++ b/drivers/net/txgbe/txgbe_rxtx.h @@ -42,6 +42,14 @@ struct txgbe_rx_desc { } qw1; /* also as r.hdr_addr */ }; +/* @txgbe_rx_desc.qw0 */ +#define TXGBE_RXD_PKTADDR(rxd, v) \ + (((volatile __le64 *)(rxd))[0] = cpu_to_le64(v)) + +/* @txgbe_rx_desc.qw1 */ +#define TXGBE_RXD_HDRADDR(rxd, v) \ + (((volatile __le64 *)(rxd))[1] = cpu_to_le64(v)) + /** * Transmit Data Descriptor (TXGBE_TXD_TYP=DATA) **/ @@ -59,6 +67,9 @@ struct txgbe_tx_desc { #define TXGBE_PTID_MASK 0xFF +#define RTE_TXGBE_REGISTER_POLL_WAIT_10_MS 10 +#define RTE_TXGBE_WAIT_100_US 100 + #define TXGBE_TX_MAX_SEG 40 /** @@ -140,6 +151,7 @@ struct txgbe_tx_queue { volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ volatile uint32_t *tdc_reg_addr; /**< Address of TDC register. */ uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_tail; /**< current value of TDT reg. */ /**< Start freeing TX buffers if there are less free descriptors than * this value. */ -- 2.18.4