DPDK patches and discussions
 help / color / mirror / Atom feed
From: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
To: Jiawen Wu <jiawenwu@trustnetic.com>, dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v5 23/24] net/ngbe: start and stop RxTx
Date: Mon, 14 Jun 2021 23:44:29 +0300	[thread overview]
Message-ID: <93ff48bb-434c-29b4-b0b0-41e673dfbd26@oktetlabs.ru> (raw)
In-Reply-To: <20210602094108.1575640-24-jiawenwu@trustnetic.com>

On 6/2/21 12:41 PM, Jiawen Wu wrote:
> Support to start and stop receive and transmit unit for specified
> queues.

Before the patch attempt to setup Rx or Tx queue with deferred start
should return an error.

> 
> Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
> ---
>   doc/guides/nics/features/ngbe.ini  |   1 +
>   drivers/net/ngbe/base/ngbe_dummy.h |  15 ++
>   drivers/net/ngbe/base/ngbe_hw.c    | 105 ++++++++++
>   drivers/net/ngbe/base/ngbe_hw.h    |   4 +
>   drivers/net/ngbe/base/ngbe_type.h  |   5 +
>   drivers/net/ngbe/ngbe_ethdev.c     |  10 +
>   drivers/net/ngbe/ngbe_ethdev.h     |  15 ++
>   drivers/net/ngbe/ngbe_rxtx.c       | 307 +++++++++++++++++++++++++++++
>   drivers/net/ngbe/ngbe_rxtx.h       |   3 +
>   9 files changed, 465 insertions(+)
> 
> diff --git a/doc/guides/nics/features/ngbe.ini b/doc/guides/nics/features/ngbe.ini
> index 443c6691a3..43b6b2c2c7 100644
> --- a/doc/guides/nics/features/ngbe.ini
> +++ b/doc/guides/nics/features/ngbe.ini
> @@ -7,6 +7,7 @@
>   Speed capabilities   = Y
>   Link status          = Y
>   Link status event    = Y
> +Queue start/stop     = Y
>   Jumbo frame          = Y
>   Scattered Rx         = Y
>   TSO                  = Y
> diff --git a/drivers/net/ngbe/base/ngbe_dummy.h b/drivers/net/ngbe/base/ngbe_dummy.h
> index dfc7b13192..384631b4f1 100644
> --- a/drivers/net/ngbe/base/ngbe_dummy.h
> +++ b/drivers/net/ngbe/base/ngbe_dummy.h
> @@ -59,6 +59,18 @@ static inline s32 ngbe_mac_get_mac_addr_dummy(struct ngbe_hw *TUP0, u8 *TUP1)
>   {
>   	return NGBE_ERR_OPS_DUMMY;
>   }
> +static inline s32 ngbe_mac_enable_rx_dma_dummy(struct ngbe_hw *TUP0, u32 TUP1)
> +{
> +	return NGBE_ERR_OPS_DUMMY;
> +}
> +static inline s32 ngbe_mac_disable_sec_rx_path_dummy(struct ngbe_hw *TUP0)
> +{
> +	return NGBE_ERR_OPS_DUMMY;
> +}
> +static inline s32 ngbe_mac_enable_sec_rx_path_dummy(struct ngbe_hw *TUP0)
> +{
> +	return NGBE_ERR_OPS_DUMMY;
> +}
>   static inline s32 ngbe_mac_acquire_swfw_sync_dummy(struct ngbe_hw *TUP0,
>   					u32 TUP1)
>   {
> @@ -167,6 +179,9 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
>   	hw->mac.start_hw = ngbe_mac_start_hw_dummy;
>   	hw->mac.stop_hw = ngbe_mac_stop_hw_dummy;
>   	hw->mac.get_mac_addr = ngbe_mac_get_mac_addr_dummy;
> +	hw->mac.enable_rx_dma = ngbe_mac_enable_rx_dma_dummy;
> +	hw->mac.disable_sec_rx_path = ngbe_mac_disable_sec_rx_path_dummy;
> +	hw->mac.enable_sec_rx_path = ngbe_mac_enable_sec_rx_path_dummy;
>   	hw->mac.acquire_swfw_sync = ngbe_mac_acquire_swfw_sync_dummy;
>   	hw->mac.release_swfw_sync = ngbe_mac_release_swfw_sync_dummy;
>   	hw->mac.setup_link = ngbe_mac_setup_link_dummy;
> diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c
> index b0bc714741..030068f3f7 100644
> --- a/drivers/net/ngbe/base/ngbe_hw.c
> +++ b/drivers/net/ngbe/base/ngbe_hw.c
> @@ -536,6 +536,63 @@ void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask)
>   	ngbe_release_eeprom_semaphore(hw);
>   }
>   
> +/**
> + *  ngbe_disable_sec_rx_path - Stops the receive data path
> + *  @hw: pointer to hardware structure
> + *
> + *  Stops the receive data path and waits for the HW to internally empty
> + *  the Rx security block
> + **/
> +s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw)
> +{
> +#define NGBE_MAX_SECRX_POLL 4000
> +
> +	int i;
> +	u32 secrxreg;
> +
> +	DEBUGFUNC("ngbe_disable_sec_rx_path");
> +
> +
> +	secrxreg = rd32(hw, NGBE_SECRXCTL);
> +	secrxreg |= NGBE_SECRXCTL_XDSA;
> +	wr32(hw, NGBE_SECRXCTL, secrxreg);
> +	for (i = 0; i < NGBE_MAX_SECRX_POLL; i++) {
> +		secrxreg = rd32(hw, NGBE_SECRXSTAT);
> +		if (!(secrxreg & NGBE_SECRXSTAT_RDY))
> +			/* Use interrupt-safe sleep just in case */
> +			usec_delay(10);
> +		else
> +			break;
> +	}
> +
> +	/* For informational purposes only */
> +	if (i >= NGBE_MAX_SECRX_POLL)
> +		DEBUGOUT("Rx unit being enabled before security "
> +			 "path fully disabled.  Continuing with init.\n");
> +
> +	return 0;
> +}
> +
> +/**
> + *  ngbe_enable_sec_rx_path - Enables the receive data path
> + *  @hw: pointer to hardware structure
> + *
> + *  Enables the receive data path.
> + **/
> +s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw)
> +{
> +	u32 secrxreg;
> +
> +	DEBUGFUNC("ngbe_enable_sec_rx_path");
> +
> +	secrxreg = rd32(hw, NGBE_SECRXCTL);
> +	secrxreg &= ~NGBE_SECRXCTL_XDSA;
> +	wr32(hw, NGBE_SECRXCTL, secrxreg);
> +	ngbe_flush(hw);
> +
> +	return 0;
> +}
> +
>   /**
>    *  ngbe_clear_vmdq - Disassociate a VMDq pool index from a rx address
>    *  @hw: pointer to hardware struct
> @@ -757,6 +814,21 @@ void ngbe_disable_rx(struct ngbe_hw *hw)
>   	wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0);
>   }
>   
> +void ngbe_enable_rx(struct ngbe_hw *hw)
> +{
> +	u32 pfdtxgswc;
> +
> +	wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, NGBE_MACRXCFG_ENA);
> +	wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, NGBE_PBRXCTL_ENA);
> +
> +	if (hw->mac.set_lben) {
> +		pfdtxgswc = rd32(hw, NGBE_PSRCTL);
> +		pfdtxgswc |= NGBE_PSRCTL_LBENA;
> +		wr32(hw, NGBE_PSRCTL, pfdtxgswc);
> +		hw->mac.set_lben = false;
> +	}
> +}
> +
>   /**
>    *  ngbe_set_mac_type - Sets MAC type
>    *  @hw: pointer to the HW structure
> @@ -803,6 +875,36 @@ s32 ngbe_set_mac_type(struct ngbe_hw *hw)
>   	return err;
>   }
>   
> +/**
> + *  ngbe_enable_rx_dma - Enable the Rx DMA unit
> + *  @hw: pointer to hardware structure
> + *  @regval: register value to write to RXCTRL
> + *
> + *  Enables the Rx DMA unit
> + **/
> +s32 ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval)
> +{
> +	DEBUGFUNC("ngbe_enable_rx_dma");
> +
> +	/*
> +	 * Workaround silicon errata when enabling the Rx datapath.
> +	 * If traffic is incoming before we enable the Rx unit, it could hang
> +	 * the Rx DMA unit.  Therefore, make sure the security engine is
> +	 * completely disabled prior to enabling the Rx unit.
> +	 */
> +
> +	hw->mac.disable_sec_rx_path(hw);
> +
> +	if (regval & NGBE_PBRXCTL_ENA)
> +		ngbe_enable_rx(hw);
> +	else
> +		ngbe_disable_rx(hw);
> +
> +	hw->mac.enable_sec_rx_path(hw);
> +
> +	return 0;
> +}
> +
>   void ngbe_map_device_id(struct ngbe_hw *hw)
>   {
>   	u16 oem = hw->sub_system_id & NGBE_OEM_MASK;
> @@ -887,11 +989,14 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw)
>   	mac->init_hw = ngbe_init_hw;
>   	mac->reset_hw = ngbe_reset_hw_em;
>   	mac->start_hw = ngbe_start_hw;
> +	mac->enable_rx_dma = ngbe_enable_rx_dma;
>   	mac->get_mac_addr = ngbe_get_mac_addr;
>   	mac->stop_hw = ngbe_stop_hw;
>   	mac->acquire_swfw_sync = ngbe_acquire_swfw_sync;
>   	mac->release_swfw_sync = ngbe_release_swfw_sync;
>   
> +	mac->disable_sec_rx_path = ngbe_disable_sec_rx_path;
> +	mac->enable_sec_rx_path = ngbe_enable_sec_rx_path;
>   	/* RAR */
>   	mac->set_rar = ngbe_set_rar;
>   	mac->clear_rar = ngbe_clear_rar;
> diff --git a/drivers/net/ngbe/base/ngbe_hw.h b/drivers/net/ngbe/base/ngbe_hw.h
> index 4fee5735ac..01f41fe9b3 100644
> --- a/drivers/net/ngbe/base/ngbe_hw.h
> +++ b/drivers/net/ngbe/base/ngbe_hw.h
> @@ -34,6 +34,8 @@ s32 ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
>   			  u32 enable_addr);
>   s32 ngbe_clear_rar(struct ngbe_hw *hw, u32 index);
>   s32 ngbe_init_rx_addrs(struct ngbe_hw *hw);
> +s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw);
> +s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw);
>   
>   s32 ngbe_validate_mac_addr(u8 *mac_addr);
>   s32 ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask);
> @@ -46,10 +48,12 @@ s32 ngbe_init_uta_tables(struct ngbe_hw *hw);
>   s32 ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw);
>   s32 ngbe_mac_check_overtemp(struct ngbe_hw *hw);
>   void ngbe_disable_rx(struct ngbe_hw *hw);
> +void ngbe_enable_rx(struct ngbe_hw *hw);
>   s32 ngbe_init_shared_code(struct ngbe_hw *hw);
>   s32 ngbe_set_mac_type(struct ngbe_hw *hw);
>   s32 ngbe_init_ops_pf(struct ngbe_hw *hw);
>   s32 ngbe_init_phy(struct ngbe_hw *hw);
> +s32 ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval);
>   void ngbe_map_device_id(struct ngbe_hw *hw);
>   
>   #endif /* _NGBE_HW_H_ */
> diff --git a/drivers/net/ngbe/base/ngbe_type.h b/drivers/net/ngbe/base/ngbe_type.h
> index 601fb85b91..134d2019e1 100644
> --- a/drivers/net/ngbe/base/ngbe_type.h
> +++ b/drivers/net/ngbe/base/ngbe_type.h
> @@ -102,6 +102,9 @@ struct ngbe_mac_info {
>   	s32 (*start_hw)(struct ngbe_hw *hw);
>   	s32 (*stop_hw)(struct ngbe_hw *hw);
>   	s32 (*get_mac_addr)(struct ngbe_hw *hw, u8 *mac_addr);
> +	s32 (*enable_rx_dma)(struct ngbe_hw *hw, u32 regval);
> +	s32 (*disable_sec_rx_path)(struct ngbe_hw *hw);
> +	s32 (*enable_sec_rx_path)(struct ngbe_hw *hw);
>   	s32 (*acquire_swfw_sync)(struct ngbe_hw *hw, u32 mask);
>   	void (*release_swfw_sync)(struct ngbe_hw *hw, u32 mask);
>   
> @@ -196,6 +199,8 @@ struct ngbe_hw {
>   	u16 nb_rx_queues;
>   	u16 nb_tx_queues;
>   
> +	u32 q_rx_regs[8 * 4];
> +	u32 q_tx_regs[8 * 4];
>   	bool is_pf;
>   };
>   
> diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
> index 3812663591..2b551c00c7 100644
> --- a/drivers/net/ngbe/ngbe_ethdev.c
> +++ b/drivers/net/ngbe/ngbe_ethdev.c
> @@ -435,6 +435,12 @@ ngbe_dev_start(struct rte_eth_dev *dev)
>   		goto error;
>   	}
>   
> +	err = ngbe_dev_rxtx_start(dev);
> +	if (err < 0) {
> +		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
> +		goto error;
> +	}
> +

It is a part of device start procedure which is required
even when deferrred start is not supported.

May be separate patch which adds Tx queues start to device
start. And similar for Rx.

>   	/* Skip link setup if loopback mode is enabled. */
>   	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
>   		goto skip_link_setup;
> @@ -1116,6 +1122,10 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
>   	.dev_start                  = ngbe_dev_start,
>   	.link_update                = ngbe_dev_link_update,
>   	.dev_supported_ptypes_get   = ngbe_dev_supported_ptypes_get,
> +	.rx_queue_start	            = ngbe_dev_rx_queue_start,
> +	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
> +	.tx_queue_start	            = ngbe_dev_tx_queue_start,
> +	.tx_queue_stop              = ngbe_dev_tx_queue_stop,

These callbacks really belongs to deferred start feature.
May be it makes sense to seprate Rx and Tx in different patches.

>   	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
>   	.rx_queue_release           = ngbe_dev_rx_queue_release,
>   	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
> diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
> index 0b8dba571b..97ced40e4b 100644
> --- a/drivers/net/ngbe/ngbe_ethdev.h
> +++ b/drivers/net/ngbe/ngbe_ethdev.h
> @@ -78,6 +78,21 @@ int ngbe_dev_rx_init(struct rte_eth_dev *dev);
>   
>   void ngbe_dev_tx_init(struct rte_eth_dev *dev);
>   
> +int ngbe_dev_rxtx_start(struct rte_eth_dev *dev);
> +
> +void ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id);
> +void ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id);
> +void ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id);
> +void ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id);
> +
> +int ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
> +
> +int ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
> +
> +int ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
> +
> +int ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
> +
>   uint16_t ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>   		uint16_t nb_pkts);
>   
> diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
> index 3f3f2cab06..daa2d7ae4d 100644
> --- a/drivers/net/ngbe/ngbe_rxtx.c
> +++ b/drivers/net/ngbe/ngbe_rxtx.c
> @@ -2236,6 +2236,38 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
>   	return 0;
>   }
>   
> +static int __rte_cold
> +ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq)
> +{
> +	struct ngbe_rx_entry *rxe = rxq->sw_ring;
> +	uint64_t dma_addr;
> +	unsigned int i;
> +
> +	/* Initialize software ring entries */
> +	for (i = 0; i < rxq->nb_rx_desc; i++) {
> +		volatile struct ngbe_rx_desc *rxd;

Please, add a comment to explain why volatile is required.

> +		struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
> +
> +		if (mbuf == NULL) {
> +			PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",

RX -> Rx

port_id should be logged as well

> +				     (unsigned int)rxq->queue_id);
> +			return -ENOMEM;
> +		}
> +
> +		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
> +		mbuf->port = rxq->port_id;
> +
> +		dma_addr =
> +			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
> +		rxd = &rxq->rx_ring[i];
> +		NGBE_RXD_HDRADDR(rxd, 0);
> +		NGBE_RXD_PKTADDR(rxd, dma_addr);
> +		rxe[i].mbuf = mbuf;
> +	}
> +
> +	return 0;
> +}
> +
>   void __rte_cold
>   ngbe_set_rx_function(struct rte_eth_dev *dev)
>   {
> @@ -2473,3 +2505,278 @@ ngbe_dev_tx_init(struct rte_eth_dev *dev)
>   	}
>   }
>   
> +/*
> + * Set up link loopback mode Tx->Rx.
> + */
> +static inline void __rte_cold
> +ngbe_setup_loopback_link(struct ngbe_hw *hw)
> +{
> +	PMD_INIT_FUNC_TRACE();
> +
> +	wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_LB, NGBE_MACRXCFG_LB);
> +
> +	msec_delay(50);
> +}

Loopback support is a separate feature.

> +
> +/*
> + * Start Transmit and Receive Units.
> + */
> +int __rte_cold
> +ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
> +{
> +	struct ngbe_hw     *hw;
> +	struct ngbe_tx_queue *txq;
> +	struct ngbe_rx_queue *rxq;
> +	uint32_t dmatxctl;
> +	uint32_t rxctrl;
> +	uint16_t i;
> +	int ret = 0;
> +
> +	PMD_INIT_FUNC_TRACE();
> +	hw = NGBE_DEV_HW(dev);
> +
> +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> +		txq = dev->data->tx_queues[i];
> +		/* Setup Transmit Threshold Registers */
> +		wr32m(hw, NGBE_TXCFG(txq->reg_idx),
> +		      NGBE_TXCFG_HTHRESH_MASK |
> +		      NGBE_TXCFG_WTHRESH_MASK,
> +		      NGBE_TXCFG_HTHRESH(txq->hthresh) |
> +		      NGBE_TXCFG_WTHRESH(txq->wthresh));
> +	}
> +
> +	dmatxctl = rd32(hw, NGBE_DMATXCTRL);
> +	dmatxctl |= NGBE_DMATXCTRL_ENA;
> +	wr32(hw, NGBE_DMATXCTRL, dmatxctl);
> +
> +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> +		txq = dev->data->tx_queues[i];
> +		if (!txq->tx_deferred_start) {

tx_deferred_start is not a bool, so, should be compared vs 0

> +			ret = ngbe_dev_tx_queue_start(dev, i);
> +			if (ret < 0)
> +				return ret;
> +		}
> +	}
> +
> +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> +		rxq = dev->data->rx_queues[i];
> +		if (!rxq->rx_deferred_start) {

rx_deferred_start is not a bool, so, should be compared vs 0

> +			ret = ngbe_dev_rx_queue_start(dev, i);
> +			if (ret < 0)
> +				return ret;
> +		}
> +	}
> +
> +	/* Enable Receive engine */
> +	rxctrl = rd32(hw, NGBE_PBRXCTL);
> +	rxctrl |= NGBE_PBRXCTL_ENA;
> +	hw->mac.enable_rx_dma(hw, rxctrl);
> +
> +	/* If loopback mode is enabled, set up the link accordingly */
> +	if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
> +		ngbe_setup_loopback_link(hw);

Loopback support is a separate feature. Before the patch
request for loopback mode should return an error.

> +
> +	return 0;
> +}
> +
> +void
> +ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
> +{
> +	u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
> +	*(reg++) = rd32(hw, NGBE_RXBAL(rx_queue_id));
> +	*(reg++) = rd32(hw, NGBE_RXBAH(rx_queue_id));
> +	*(reg++) = rd32(hw, NGBE_RXCFG(rx_queue_id));
> +}
> +
> +void
> +ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
> +{
> +	u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
> +	wr32(hw, NGBE_RXBAL(rx_queue_id), *(reg++));
> +	wr32(hw, NGBE_RXBAH(rx_queue_id), *(reg++));
> +	wr32(hw, NGBE_RXCFG(rx_queue_id), *(reg++) & ~NGBE_RXCFG_ENA);
> +}
> +
> +void
> +ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
> +{
> +	u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
> +	*(reg++) = rd32(hw, NGBE_TXBAL(tx_queue_id));
> +	*(reg++) = rd32(hw, NGBE_TXBAH(tx_queue_id));
> +	*(reg++) = rd32(hw, NGBE_TXCFG(tx_queue_id));
> +}
> +
> +void
> +ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
> +{
> +	u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
> +	wr32(hw, NGBE_TXBAL(tx_queue_id), *(reg++));
> +	wr32(hw, NGBE_TXBAH(tx_queue_id), *(reg++));
> +	wr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA);
> +}
> +
> +/*
> + * Start Receive Units for specified queue.
> + */
> +int __rte_cold
> +ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
> +{
> +	struct ngbe_hw *hw = NGBE_DEV_HW(dev);
> +	struct ngbe_rx_queue *rxq;
> +	uint32_t rxdctl;
> +	int poll_ms;
> +
> +	PMD_INIT_FUNC_TRACE();
> +
> +	rxq = dev->data->rx_queues[rx_queue_id];
> +
> +	/* Allocate buffers for descriptor rings */
> +	if (ngbe_alloc_rx_queue_mbufs(rxq) != 0) {
> +		PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
> +			     rx_queue_id);
> +		return -1;
> +	}
> +	rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
> +	rxdctl |= NGBE_RXCFG_ENA;
> +	wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxdctl);
> +
> +	/* Wait until RX Enable ready */

RX -> Rx

> +	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
> +	do {
> +		rte_delay_ms(1);
> +		rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
> +	} while (--poll_ms && !(rxdctl & NGBE_RXCFG_ENA));
> +	if (!poll_ms)

Compare vs 0

> +		PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
> +	rte_wmb();
> +	wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
> +	wr32(hw, NGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
> +	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
> +
> +	return 0;
> +}
> +
> +/*
> + * Stop Receive Units for specified queue.
> + */
> +int __rte_cold
> +ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
> +{
> +	struct ngbe_hw *hw = NGBE_DEV_HW(dev);
> +	struct ngbe_adapter *adapter = NGBE_DEV_ADAPTER(dev);
> +	struct ngbe_rx_queue *rxq;
> +	uint32_t rxdctl;
> +	int poll_ms;
> +
> +	PMD_INIT_FUNC_TRACE();
> +
> +	rxq = dev->data->rx_queues[rx_queue_id];
> +
> +	ngbe_dev_save_rx_queue(hw, rxq->reg_idx);
> +	wr32m(hw, NGBE_RXCFG(rxq->reg_idx), NGBE_RXCFG_ENA, 0);
> +
> +	/* Wait until RX Enable bit clear */

RX -> Rx

> +	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
> +	do {
> +		rte_delay_ms(1);
> +		rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
> +	} while (--poll_ms && (rxdctl & NGBE_RXCFG_ENA));
> +	if (!poll_ms)

Compare vs 0

> +		PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
> +
> +	rte_delay_us(RTE_NGBE_WAIT_100_US);
> +	ngbe_dev_store_rx_queue(hw, rxq->reg_idx);
> +
> +	ngbe_rx_queue_release_mbufs(rxq);
> +	ngbe_reset_rx_queue(adapter, rxq);
> +	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
> +
> +	return 0;
> +}
> +
> +/*
> + * Start Transmit Units for specified queue.
> + */
> +int __rte_cold
> +ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
> +{
> +	struct ngbe_hw *hw = NGBE_DEV_HW(dev);
> +	struct ngbe_tx_queue *txq;
> +	uint32_t txdctl;
> +	int poll_ms;
> +
> +	PMD_INIT_FUNC_TRACE();
> +
> +	txq = dev->data->tx_queues[tx_queue_id];
> +	wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, NGBE_TXCFG_ENA);
> +
> +	/* Wait until TX Enable ready */

TX -> Tx

> +	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
> +	do {
> +		rte_delay_ms(1);
> +		txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
> +	} while (--poll_ms && !(txdctl & NGBE_TXCFG_ENA));
> +	if (!poll_ms)

Compare vs 0

> +		PMD_INIT_LOG(ERR, "Could not enable "
> +			     "Tx Queue %d", tx_queue_id);
> +
> +	rte_wmb();
> +	wr32(hw, NGBE_TXWP(txq->reg_idx), txq->tx_tail);
> +	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
> +
> +	return 0;
> +}
> +
> +/*
> + * Stop Transmit Units for specified queue.
> + */
> +int __rte_cold
> +ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
> +{
> +	struct ngbe_hw *hw = NGBE_DEV_HW(dev);
> +	struct ngbe_tx_queue *txq;
> +	uint32_t txdctl;
> +	uint32_t txtdh, txtdt;
> +	int poll_ms;
> +
> +	PMD_INIT_FUNC_TRACE();
> +
> +	txq = dev->data->tx_queues[tx_queue_id];
> +
> +	/* Wait until TX queue is empty */

TX -> Tx

> +	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
> +	do {
> +		rte_delay_us(RTE_NGBE_WAIT_100_US);
> +		txtdh = rd32(hw, NGBE_TXRP(txq->reg_idx));
> +		txtdt = rd32(hw, NGBE_TXWP(txq->reg_idx));
> +	} while (--poll_ms && (txtdh != txtdt));
> +	if (!poll_ms)

Compare vs 0

> +		PMD_INIT_LOG(ERR,
> +			"Tx Queue %d is not empty when stopping.",
> +			tx_queue_id);
> +
> +	ngbe_dev_save_tx_queue(hw, txq->reg_idx);
> +	wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, 0);
> +
> +	/* Wait until TX Enable bit clear */

TX -> Tx

> +	poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
> +	do {
> +		rte_delay_ms(1);
> +		txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
> +	} while (--poll_ms && (txdctl & NGBE_TXCFG_ENA));
> +	if (!poll_ms)

Compare vs 0

> +		PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
> +			tx_queue_id);
> +
> +	rte_delay_us(RTE_NGBE_WAIT_100_US);
> +	ngbe_dev_store_tx_queue(hw, txq->reg_idx);
> +
> +	if (txq->ops != NULL) {
> +		txq->ops->release_mbufs(txq);
> +		txq->ops->reset(txq);
> +	}
> +	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
> +
> +	return 0;
> +}
> +
> diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
> index 2cb98e2497..48241bd634 100644
> --- a/drivers/net/ngbe/ngbe_rxtx.h
> +++ b/drivers/net/ngbe/ngbe_rxtx.h
> @@ -208,6 +208,9 @@ struct ngbe_tx_desc {
>   
>   #define rte_packet_prefetch(p)  rte_prefetch1(p)
>   
> +#define RTE_NGBE_REGISTER_POLL_WAIT_10_MS  10
> +#define RTE_NGBE_WAIT_100_US               100
> +
>   #define NGBE_TX_MAX_SEG                    40
>   
>   /**
> 


  reply	other threads:[~2021-06-14 20:44 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-02  9:40 [dpdk-dev] [PATCH v5 00/24] net: ngbe PMD Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 01/24] net/ngbe: add build and doc infrastructure Jiawen Wu
2021-06-14 17:05   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 02/24] net/ngbe: add device IDs Jiawen Wu
2021-06-14 17:08   ` Andrew Rybchenko
2021-06-15  2:52     ` Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 03/24] net/ngbe: support probe and remove Jiawen Wu
2021-06-14 17:27   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 04/24] net/ngbe: add device init and uninit Jiawen Wu
2021-06-14 17:36   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 05/24] net/ngbe: add log type and error type Jiawen Wu
2021-06-14 17:54   ` Andrew Rybchenko
2021-06-15  7:13     ` Jiawen Wu
2021-07-01 13:57   ` David Marchand
2021-07-02  2:08     ` Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 06/24] net/ngbe: define registers Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 07/24] net/ngbe: set MAC type and LAN id Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 08/24] net/ngbe: init and validate EEPROM Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 09/24] net/ngbe: add HW initialization Jiawen Wu
2021-06-14 18:01   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 10/24] net/ngbe: identify PHY and reset PHY Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 11/24] net/ngbe: store MAC address Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 12/24] net/ngbe: add info get operation Jiawen Wu
2021-06-14 18:13   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 13/24] net/ngbe: support link update Jiawen Wu
2021-06-14 18:45   ` Andrew Rybchenko
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 14/24] net/ngbe: setup the check PHY link Jiawen Wu
2021-06-02  9:40 ` [dpdk-dev] [PATCH v5 15/24] net/ngbe: add Rx queue setup and release Jiawen Wu
2021-06-14 18:53   ` Andrew Rybchenko
2021-06-15  7:50     ` Jiawen Wu
2021-06-15  8:06       ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 16/24] net/ngbe: add Tx " Jiawen Wu
2021-06-14 18:59   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 17/24] net/ngbe: add Rx and Tx init Jiawen Wu
2021-06-14 19:01   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 18/24] net/ngbe: add packet type Jiawen Wu
2021-06-14 19:06   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 19/24] net/ngbe: add simple Rx and Tx flow Jiawen Wu
2021-06-14 19:10   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 20/24] net/ngbe: support bulk and scatter Rx Jiawen Wu
2021-06-14 19:17   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 21/24] net/ngbe: support full-featured Tx path Jiawen Wu
2021-06-14 19:22   ` Andrew Rybchenko
2021-06-14 19:23     ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 22/24] net/ngbe: add device start operation Jiawen Wu
2021-06-14 19:33   ` Andrew Rybchenko
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 23/24] net/ngbe: start and stop RxTx Jiawen Wu
2021-06-14 20:44   ` Andrew Rybchenko [this message]
2021-06-02  9:41 ` [dpdk-dev] [PATCH v5 24/24] net/ngbe: add device stop operation Jiawen Wu
2021-06-11  1:38 ` [dpdk-dev] [PATCH v5 00/24] net: ngbe PMD Jiawen Wu
2021-06-14 20:56 ` Andrew Rybchenko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=93ff48bb-434c-29b4-b0b0-41e673dfbd26@oktetlabs.ru \
    --to=andrew.rybchenko@oktetlabs.ru \
    --cc=dev@dpdk.org \
    --cc=jiawenwu@trustnetic.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).