From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 85461A0C4A; Thu, 8 Jul 2021 11:34:20 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D214A415C5; Thu, 8 Jul 2021 11:33:30 +0200 (CEST) Received: from smtpbg516.qq.com (smtpbg516.qq.com [203.205.250.54]) by mails.dpdk.org (Postfix) with ESMTP id DAD0D415E3 for ; Thu, 8 Jul 2021 11:33:27 +0200 (CEST) X-QQ-mid: bizesmtp44t1625736802tmk989i2 Received: from jiawenwu.trustnetic.com (unknown [183.129.236.74]) by esmtp6.qq.com (ESMTP) with id ; Thu, 08 Jul 2021 17:33:22 +0800 (CST) X-QQ-SSF: 01400000002000D0E000B00A0000000 X-QQ-FEAT: 5p2KJntE3DHCMINp+HBE4pz5RtB/vjYMCzrjullyNVvi9CuveVEoFQsnsVx2S 8gqJjzXR9TUG54o4gfLrGRDyNJ40Wg5RiKZqYWkBfSk6mes11XyZc/QwV1ffTW0FEUn6XHL 5o1ASDTIe/676XkGYqajgBaHM3upRjXv9oasmIyjx01oX9q2naiRhNegpp28hua08BVmcSv I18uvwWWqQjzbbyhHr2rJ0XQVAfdQm5LpD+ZkN1TtTZ6QYWQt1gGdnhnqh5k/vQFZ/sjQS3 SXN3s+FokGvBNC8bOBi+2lsMOLTfwj7UFmhNkL6ShekmWu8ZL/kkhIvVz2cL62D5MNyqhiD VKiWd+DBqxYT/JUUwM1qgV3FBB/FTeMjgpKUF5NYpFBe9m3vdo= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Thu, 8 Jul 2021 17:32:36 +0800 Message-Id: <20210708093239.13896-17-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.21.0.windows.1 In-Reply-To: <20210708093239.13896-1-jiawenwu@trustnetic.com> References: <20210708093239.13896-1-jiawenwu@trustnetic.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH v8 16/19] net/ngbe: add Rx queue start and stop X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Initializes receive unit, support to start and stop receive unit for specified queues. Signed-off-by: Jiawen Wu --- drivers/net/ngbe/base/ngbe_dummy.h | 15 ++ drivers/net/ngbe/base/ngbe_hw.c | 105 ++++++++++++++ drivers/net/ngbe/base/ngbe_hw.h | 4 + drivers/net/ngbe/base/ngbe_type.h | 4 + drivers/net/ngbe/ngbe_ethdev.c | 5 + drivers/net/ngbe/ngbe_ethdev.h | 6 + drivers/net/ngbe/ngbe_rxtx.c | 215 ++++++++++++++++++++++++++++- drivers/net/ngbe/ngbe_rxtx.h | 8 ++ 8 files changed, 359 insertions(+), 3 deletions(-) diff --git a/drivers/net/ngbe/base/ngbe_dummy.h b/drivers/net/ngbe/base/ngbe_dummy.h index 387bb16aec..8863acef0d 100644 --- a/drivers/net/ngbe/base/ngbe_dummy.h +++ b/drivers/net/ngbe/base/ngbe_dummy.h @@ -59,6 +59,18 @@ static inline s32 ngbe_mac_get_mac_addr_dummy(struct ngbe_hw *TUP0, u8 *TUP1) { return NGBE_ERR_OPS_DUMMY; } +static inline s32 ngbe_mac_enable_rx_dma_dummy(struct ngbe_hw *TUP0, u32 TUP1) +{ + return NGBE_ERR_OPS_DUMMY; +} +static inline s32 ngbe_mac_disable_sec_rx_path_dummy(struct ngbe_hw *TUP0) +{ + return NGBE_ERR_OPS_DUMMY; +} +static inline s32 ngbe_mac_enable_sec_rx_path_dummy(struct ngbe_hw *TUP0) +{ + return NGBE_ERR_OPS_DUMMY; +} static inline s32 ngbe_mac_acquire_swfw_sync_dummy(struct ngbe_hw *TUP0, u32 TUP1) { @@ -167,6 +179,9 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw) hw->mac.start_hw = ngbe_mac_start_hw_dummy; hw->mac.stop_hw = ngbe_mac_stop_hw_dummy; hw->mac.get_mac_addr = ngbe_mac_get_mac_addr_dummy; + hw->mac.enable_rx_dma = ngbe_mac_enable_rx_dma_dummy; + hw->mac.disable_sec_rx_path = ngbe_mac_disable_sec_rx_path_dummy; + hw->mac.enable_sec_rx_path = ngbe_mac_enable_sec_rx_path_dummy; hw->mac.acquire_swfw_sync = ngbe_mac_acquire_swfw_sync_dummy; hw->mac.release_swfw_sync = ngbe_mac_release_swfw_sync_dummy; hw->mac.setup_link = ngbe_mac_setup_link_dummy; diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c index 87c02a8f1f..6b575fc67b 100644 --- a/drivers/net/ngbe/base/ngbe_hw.c +++ b/drivers/net/ngbe/base/ngbe_hw.c @@ -536,6 +536,63 @@ void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask) ngbe_release_eeprom_semaphore(hw); } +/** + * ngbe_disable_sec_rx_path - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path and waits for the HW to internally empty + * the Rx security block + **/ +s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw) +{ +#define NGBE_MAX_SECRX_POLL 4000 + + int i; + u32 secrxreg; + + DEBUGFUNC("ngbe_disable_sec_rx_path"); + + + secrxreg = rd32(hw, NGBE_SECRXCTL); + secrxreg |= NGBE_SECRXCTL_XDSA; + wr32(hw, NGBE_SECRXCTL, secrxreg); + for (i = 0; i < NGBE_MAX_SECRX_POLL; i++) { + secrxreg = rd32(hw, NGBE_SECRXSTAT); + if (!(secrxreg & NGBE_SECRXSTAT_RDY)) + /* Use interrupt-safe sleep just in case */ + usec_delay(10); + else + break; + } + + /* For informational purposes only */ + if (i >= NGBE_MAX_SECRX_POLL) + DEBUGOUT("Rx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + return 0; +} + +/** + * ngbe_enable_sec_rx_path - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path. + **/ +s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw) +{ + u32 secrxreg; + + DEBUGFUNC("ngbe_enable_sec_rx_path"); + + secrxreg = rd32(hw, NGBE_SECRXCTL); + secrxreg &= ~NGBE_SECRXCTL_XDSA; + wr32(hw, NGBE_SECRXCTL, secrxreg); + ngbe_flush(hw); + + return 0; +} + /** * ngbe_clear_vmdq - Disassociate a VMDq pool index from a rx address * @hw: pointer to hardware struct @@ -756,6 +813,21 @@ void ngbe_disable_rx(struct ngbe_hw *hw) wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0); } +void ngbe_enable_rx(struct ngbe_hw *hw) +{ + u32 pfdtxgswc; + + wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, NGBE_MACRXCFG_ENA); + wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, NGBE_PBRXCTL_ENA); + + if (hw->mac.set_lben) { + pfdtxgswc = rd32(hw, NGBE_PSRCTL); + pfdtxgswc |= NGBE_PSRCTL_LBENA; + wr32(hw, NGBE_PSRCTL, pfdtxgswc); + hw->mac.set_lben = false; + } +} + /** * ngbe_set_mac_type - Sets MAC type * @hw: pointer to the HW structure @@ -802,6 +874,36 @@ s32 ngbe_set_mac_type(struct ngbe_hw *hw) return err; } +/** + * ngbe_enable_rx_dma - Enable the Rx DMA unit + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit + **/ +s32 ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval) +{ + DEBUGFUNC("ngbe_enable_rx_dma"); + + /* + * Workaround silicon errata when enabling the Rx datapath. + * If traffic is incoming before we enable the Rx unit, it could hang + * the Rx DMA unit. Therefore, make sure the security engine is + * completely disabled prior to enabling the Rx unit. + */ + + hw->mac.disable_sec_rx_path(hw); + + if (regval & NGBE_PBRXCTL_ENA) + ngbe_enable_rx(hw); + else + ngbe_disable_rx(hw); + + hw->mac.enable_sec_rx_path(hw); + + return 0; +} + void ngbe_map_device_id(struct ngbe_hw *hw) { u16 oem = hw->sub_system_id & NGBE_OEM_MASK; @@ -886,11 +988,14 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw) mac->init_hw = ngbe_init_hw; mac->reset_hw = ngbe_reset_hw_em; mac->start_hw = ngbe_start_hw; + mac->enable_rx_dma = ngbe_enable_rx_dma; mac->get_mac_addr = ngbe_get_mac_addr; mac->stop_hw = ngbe_stop_hw; mac->acquire_swfw_sync = ngbe_acquire_swfw_sync; mac->release_swfw_sync = ngbe_release_swfw_sync; + mac->disable_sec_rx_path = ngbe_disable_sec_rx_path; + mac->enable_sec_rx_path = ngbe_enable_sec_rx_path; /* RAR */ mac->set_rar = ngbe_set_rar; mac->clear_rar = ngbe_clear_rar; diff --git a/drivers/net/ngbe/base/ngbe_hw.h b/drivers/net/ngbe/base/ngbe_hw.h index 791d15fbec..17a0a03c88 100644 --- a/drivers/net/ngbe/base/ngbe_hw.h +++ b/drivers/net/ngbe/base/ngbe_hw.h @@ -34,6 +34,8 @@ s32 ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr); s32 ngbe_clear_rar(struct ngbe_hw *hw, u32 index); s32 ngbe_init_rx_addrs(struct ngbe_hw *hw); +s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw); +s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw); s32 ngbe_validate_mac_addr(u8 *mac_addr); s32 ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask); @@ -46,10 +48,12 @@ s32 ngbe_init_uta_tables(struct ngbe_hw *hw); s32 ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw); s32 ngbe_mac_check_overtemp(struct ngbe_hw *hw); void ngbe_disable_rx(struct ngbe_hw *hw); +void ngbe_enable_rx(struct ngbe_hw *hw); s32 ngbe_init_shared_code(struct ngbe_hw *hw); s32 ngbe_set_mac_type(struct ngbe_hw *hw); s32 ngbe_init_ops_pf(struct ngbe_hw *hw); s32 ngbe_init_phy(struct ngbe_hw *hw); +s32 ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval); void ngbe_map_device_id(struct ngbe_hw *hw); #endif /* _NGBE_HW_H_ */ diff --git a/drivers/net/ngbe/base/ngbe_type.h b/drivers/net/ngbe/base/ngbe_type.h index 2846a6a2b6..28540e4ba0 100644 --- a/drivers/net/ngbe/base/ngbe_type.h +++ b/drivers/net/ngbe/base/ngbe_type.h @@ -97,6 +97,9 @@ struct ngbe_mac_info { s32 (*start_hw)(struct ngbe_hw *hw); s32 (*stop_hw)(struct ngbe_hw *hw); s32 (*get_mac_addr)(struct ngbe_hw *hw, u8 *mac_addr); + s32 (*enable_rx_dma)(struct ngbe_hw *hw, u32 regval); + s32 (*disable_sec_rx_path)(struct ngbe_hw *hw); + s32 (*enable_sec_rx_path)(struct ngbe_hw *hw); s32 (*acquire_swfw_sync)(struct ngbe_hw *hw, u32 mask); void (*release_swfw_sync)(struct ngbe_hw *hw, u32 mask); @@ -190,6 +193,7 @@ struct ngbe_hw { u16 nb_rx_queues; u16 nb_tx_queues; + u32 q_rx_regs[8 * 4]; u32 q_tx_regs[8 * 4]; bool is_pf; }; diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c index f1911bdcbc..2a40dbd184 100644 --- a/drivers/net/ngbe/ngbe_ethdev.c +++ b/drivers/net/ngbe/ngbe_ethdev.c @@ -569,6 +569,8 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + dev_info->min_rx_bufsize = 1024; + dev_info->max_rx_pktlen = 15872; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -598,6 +600,7 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) ETH_LINK_SPEED_10M; /* Driver-preferred Rx/Tx parameters */ + dev_info->default_rxportconf.burst_size = 32; dev_info->default_txportconf.burst_size = 32; dev_info->default_rxportconf.nb_queues = 1; dev_info->default_txportconf.nb_queues = 1; @@ -1090,6 +1093,8 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = { .dev_start = ngbe_dev_start, .dev_stop = ngbe_dev_stop, .link_update = ngbe_dev_link_update, + .rx_queue_start = ngbe_dev_rx_queue_start, + .rx_queue_stop = ngbe_dev_rx_queue_stop, .tx_queue_start = ngbe_dev_tx_queue_start, .tx_queue_stop = ngbe_dev_tx_queue_stop, .rx_queue_setup = ngbe_dev_rx_queue_setup, diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h index 5c2aea8d50..049d8fe71d 100644 --- a/drivers/net/ngbe/ngbe_ethdev.h +++ b/drivers/net/ngbe/ngbe_ethdev.h @@ -86,9 +86,15 @@ void ngbe_dev_tx_init(struct rte_eth_dev *dev); int ngbe_dev_rxtx_start(struct rte_eth_dev *dev); +void ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id); +void ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id); void ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id); void ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id); +int ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); + +int ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); + int ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); int ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c index 63f0647413..47b5926e34 100644 --- a/drivers/net/ngbe/ngbe_rxtx.c +++ b/drivers/net/ngbe/ngbe_rxtx.c @@ -511,15 +511,111 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev) } } +static int +ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq) +{ + struct ngbe_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned int i; + + /* Initialize software ring entries */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + /* the ring can also be modified by hardware */ + volatile struct ngbe_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "Rx mbuf alloc failed queue_id=%u port_id=%u", + (unsigned int)rxq->queue_id, + (unsigned int)rxq->port_id); + return -ENOMEM; + } + + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->port = rxq->port_id; + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxd = &rxq->rx_ring[i]; + NGBE_RXD_HDRADDR(rxd, 0); + NGBE_RXD_PKTADDR(rxd, dma_addr); + rxe[i].mbuf = mbuf; + } + + return 0; +} + /* * Initializes Receive Unit. */ int ngbe_dev_rx_init(struct rte_eth_dev *dev) { - RTE_SET_USED(dev); + struct ngbe_hw *hw; + struct ngbe_rx_queue *rxq; + uint64_t bus_addr; + uint32_t fctrl; + uint32_t hlreg0; + uint32_t srrctl; + uint16_t buf_size; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + hw = ngbe_dev_hw(dev); + + /* + * Make sure receives are disabled while setting + * up the Rx context (registers, descriptor rings, etc.). + */ + wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0); + wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0); + + /* Enable receipt of broadcasted frames */ + fctrl = rd32(hw, NGBE_PSRCTL); + fctrl |= NGBE_PSRCTL_BCA; + wr32(hw, NGBE_PSRCTL, fctrl); + + hlreg0 = rd32(hw, NGBE_SECRXCTL); + hlreg0 &= ~NGBE_SECRXCTL_XDSA; + wr32(hw, NGBE_SECRXCTL, hlreg0); + + wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK, + NGBE_FRMSZ_MAX(NGBE_FRAME_SIZE_DFT)); - return -EINVAL; + /* Setup Rx queues */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + /* Setup the Base and Length of the Rx Descriptor Rings */ + bus_addr = rxq->rx_ring_phys_addr; + wr32(hw, NGBE_RXBAL(rxq->reg_idx), + (uint32_t)(bus_addr & BIT_MASK32)); + wr32(hw, NGBE_RXBAH(rxq->reg_idx), + (uint32_t)(bus_addr >> 32)); + wr32(hw, NGBE_RXRP(rxq->reg_idx), 0); + wr32(hw, NGBE_RXWP(rxq->reg_idx), 0); + + srrctl = NGBE_RXCFG_RNGLEN(rxq->nb_rx_desc); + + /* Set if packets are dropped when no descriptors available */ + if (rxq->drop_en) + srrctl |= NGBE_RXCFG_DROP; + + /* + * Configure the Rx buffer size in the PKTLEN field of + * the RXCFG register of the queue. + * The value is in 1 KB resolution. Valid values can be from + * 1 KB to 16 KB. + */ + buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM); + buf_size = ROUND_DOWN(buf_size, 0x1 << 10); + srrctl |= NGBE_RXCFG_PKTLEN(buf_size); + + wr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl); + } + + return 0; } /* @@ -564,7 +660,9 @@ ngbe_dev_rxtx_start(struct rte_eth_dev *dev) { struct ngbe_hw *hw; struct ngbe_tx_queue *txq; + struct ngbe_rx_queue *rxq; uint32_t dmatxctl; + uint32_t rxctrl; uint16_t i; int ret = 0; @@ -594,7 +692,39 @@ ngbe_dev_rxtx_start(struct rte_eth_dev *dev) } } - return -EINVAL; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (rxq->rx_deferred_start == 0) { + ret = ngbe_dev_rx_queue_start(dev, i); + if (ret < 0) + return ret; + } + } + + /* Enable Receive engine */ + rxctrl = rd32(hw, NGBE_PBRXCTL); + rxctrl |= NGBE_PBRXCTL_ENA; + hw->mac.enable_rx_dma(hw, rxctrl); + + return 0; +} + +void +ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id) +{ + u32 *reg = &hw->q_rx_regs[rx_queue_id * 8]; + *(reg++) = rd32(hw, NGBE_RXBAL(rx_queue_id)); + *(reg++) = rd32(hw, NGBE_RXBAH(rx_queue_id)); + *(reg++) = rd32(hw, NGBE_RXCFG(rx_queue_id)); +} + +void +ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id) +{ + u32 *reg = &hw->q_rx_regs[rx_queue_id * 8]; + wr32(hw, NGBE_RXBAL(rx_queue_id), *(reg++)); + wr32(hw, NGBE_RXBAH(rx_queue_id), *(reg++)); + wr32(hw, NGBE_RXCFG(rx_queue_id), *(reg++) & ~NGBE_RXCFG_ENA); } void @@ -615,6 +745,85 @@ ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id) wr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA); } +/* + * Start Receive Units for specified queue. + */ +int +ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + rxq = dev->data->rx_queues[rx_queue_id]; + + /* Allocate buffers for descriptor rings */ + if (ngbe_alloc_rx_queue_mbufs(rxq) != 0) { + PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", + rx_queue_id); + return -1; + } + rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx)); + rxdctl |= NGBE_RXCFG_ENA; + wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxdctl); + + /* Wait until Rx Enable ready */ + poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx)); + } while (--poll_ms && !(rxdctl & NGBE_RXCFG_ENA)); + if (poll_ms == 0) + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id); + rte_wmb(); + wr32(hw, NGBE_RXRP(rxq->reg_idx), 0); + wr32(hw, NGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +/* + * Stop Receive Units for specified queue. + */ +int +ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_adapter *adapter = ngbe_dev_adapter(dev); + struct ngbe_rx_queue *rxq; + uint32_t rxdctl; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + + rxq = dev->data->rx_queues[rx_queue_id]; + + ngbe_dev_save_rx_queue(hw, rxq->reg_idx); + wr32m(hw, NGBE_RXCFG(rxq->reg_idx), NGBE_RXCFG_ENA, 0); + + /* Wait until Rx Enable bit clear */ + poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx)); + } while (--poll_ms && (rxdctl & NGBE_RXCFG_ENA)); + if (poll_ms == 0) + PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id); + + rte_delay_us(RTE_NGBE_WAIT_100_US); + ngbe_dev_store_rx_queue(hw, rxq->reg_idx); + + ngbe_rx_queue_release_mbufs(rxq); + ngbe_reset_rx_queue(adapter, rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + /* * Start Transmit Units for specified queue. */ diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h index 03e98284a8..e35ef166d3 100644 --- a/drivers/net/ngbe/ngbe_rxtx.h +++ b/drivers/net/ngbe/ngbe_rxtx.h @@ -43,6 +43,14 @@ struct ngbe_rx_desc { } qw1; /* also as r.hdr_addr */ }; +/* @ngbe_rx_desc.qw0 */ +#define NGBE_RXD_PKTADDR(rxd, v) \ + (((volatile __le64 *)(rxd))[0] = cpu_to_le64(v)) + +/* @ngbe_rx_desc.qw1 */ +#define NGBE_RXD_HDRADDR(rxd, v) \ + (((volatile __le64 *)(rxd))[1] = cpu_to_le64(v)) + /***************************************************************************** * Transmit Descriptor *****************************************************************************/ -- 2.21.0.windows.1