patches for DPDK stable branches
 help / color / Atom feed
From: Kevin Traynor <ktraynor@redhat.com>
To: Xiao Zhang <xiao.zhang@intel.com>, dev@dpdk.org
Cc: wenzhuo.lu@intel.com, wei.zhao1@intel.com, xiaolong.ye@intel.com,
	stable@dpdk.org
Subject: Re: [dpdk-stable] [dpdk-dev] [v8] net/e1000: fix i219 hang on reset/close
Date: Wed, 4 Sep 2019 13:22:56 +0100
Message-ID: <aa94c624-123c-59b7-a1d4-6bd9fd22ffe9@redhat.com> (raw)
In-Reply-To: <1563808312-64145-1-git-send-email-xiao.zhang@intel.com>

On 22/07/2019 16:11, Xiao Zhang wrote:
> Unit hang may occur if multiple descriptors are available in the rings
> during reset or close. This state can be detected by configure status
> by bit 8 in register. If the bit is set and there are pending
> descriptors in one of the rings, we must flush them before reset or
> close.
> 
> Fixes: 80580344("e1000: support EM devices (also known as e1000/e1000e)")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
> ---
> v8 Modify to follow code style of dpdk community.
> v7 Add fix line.
> v6 Change the fix on em driver instead of igb driver and update the 
> register address according to C-Spec.
> v5 Change the subject.
> v4 Correct the tail descriptor of tx ring.
> v3 Add loop to handle all tx and rx queues.
> v2 Use configuration register instead of NVM7 to get the hang state.
> ---

Hi Wenzhuo, as e1000 maintainer can you review and ack this patch for
stable ?

>  drivers/net/e1000/e1000_ethdev.h |   4 ++
>  drivers/net/e1000/em_ethdev.c    |   5 ++
>  drivers/net/e1000/em_rxtx.c      | 111 +++++++++++++++++++++++++++++++++++++++
>  3 files changed, 120 insertions(+)
> 
> diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
> index 67acb73..01ff943 100644
> --- a/drivers/net/e1000/e1000_ethdev.h
> +++ b/drivers/net/e1000/e1000_ethdev.h
> @@ -35,6 +35,9 @@
>  #define IGB_MAX_RX_QUEUE_NUM           8
>  #define IGB_MAX_RX_QUEUE_NUM_82576     16
>  
> +#define E1000_I219_MAX_RX_QUEUE_NUM		2
> +#define E1000_I219_MAX_TX_QUEUE_NUM		2
> +
>  #define E1000_SYN_FILTER_ENABLE        0x00000001 /* syn filter enable field */
>  #define E1000_SYN_FILTER_QUEUE         0x0000000E /* syn filter queue field */
>  #define E1000_SYN_FILTER_QUEUE_SHIFT   1          /* syn filter queue field */
> @@ -522,5 +525,6 @@ int igb_action_rss_same(const struct rte_flow_action_rss *comp,
>  int igb_config_rss_filter(struct rte_eth_dev *dev,
>  			struct igb_rte_flow_rss_conf *conf,
>  			bool add);
> +void em_flush_desc_rings(struct rte_eth_dev *dev);
>  
>  #endif /* _E1000_ETHDEV_H_ */
> diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
> index dc88661..62d3a95 100644
> --- a/drivers/net/e1000/em_ethdev.c
> +++ b/drivers/net/e1000/em_ethdev.c
> @@ -738,6 +738,11 @@ eth_em_stop(struct rte_eth_dev *dev)
>  	em_lsc_intr_disable(hw);
>  
>  	e1000_reset_hw(hw);
> +
> +	/* Flush desc rings for i219 */
> +	if (hw->mac.type >= e1000_pch_spt)

It means it is called for mac types below - is it right?

	e1000_pch_spt,
	e1000_pch_cnp,
	e1000_82575,
	e1000_82576,
	e1000_82580,
	e1000_i350,
	e1000_i354,
	e1000_i210,
	e1000_i211,
	e1000_vfadapt,
	e1000_vfadapt_i350,

> +		em_flush_desc_rings(dev);
> +
>  	if (hw->mac.type >= e1000_82544)
>  		E1000_WRITE_REG(hw, E1000_WUC, 0);
>  
> diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
> index 708f832..55d8a67 100644
> --- a/drivers/net/e1000/em_rxtx.c
> +++ b/drivers/net/e1000/em_rxtx.c
> @@ -18,6 +18,7 @@
>  #include <rte_log.h>
>  #include <rte_debug.h>
>  #include <rte_pci.h>
> +#include <rte_bus_pci.h>
>  #include <rte_memory.h>
>  #include <rte_memcpy.h>
>  #include <rte_memzone.h>
> @@ -59,6 +60,11 @@
>  #define E1000_TX_OFFLOAD_NOTSUP_MASK \
>  		(PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
>  
> +/* PCI offset for querying configuration status register */
> +#define PCI_CFG_STATUS_REG                 0x06
> +#define FLUSH_DESC_REQUIRED               0x100
> +
> +
>  /**
>   * Structure associated with each descriptor of the RX ring of a RX queue.
>   */
> @@ -2000,3 +2006,108 @@ em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
>  	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
>  	qinfo->conf.offloads = txq->offloads;
>  }
> +
> +static void
> +e1000_flush_tx_ring(struct rte_eth_dev *dev)
> +{
> +	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	volatile struct e1000_data_desc *tx_desc;
> +	volatile uint32_t *tdt_reg_addr;
> +	uint32_t tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
> +	uint16_t size = 512;
> +	struct em_tx_queue *txq;
> +	int i;
> +
> +	if (dev->data->tx_queues == NULL)
> +		return;
> +	tctl = E1000_READ_REG(hw, E1000_TCTL);
> +	E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN);
> +	for (i = 0; i < dev->data->nb_tx_queues &&
> +		i < E1000_I219_MAX_TX_QUEUE_NUM; i++) {
> +		txq = dev->data->tx_queues[i];
> +		tdt = E1000_READ_REG(hw, E1000_TDT(i));
> +		if (tdt != txq->tx_tail)
> +			return;
> +		tx_desc = &txq->tx_ring[txq->tx_tail];
> +		tx_desc->buffer_addr = rte_cpu_to_le_64(txq->tx_ring_phys_addr);
> +		tx_desc->lower.data = rte_cpu_to_le_32(txd_lower | size);
> +		tx_desc->upper.data = 0;
> +
> +		rte_wmb();
> +		txq->tx_tail++;
> +		if (txq->tx_tail == txq->nb_tx_desc)
> +			txq->tx_tail = 0;
> +		rte_io_wmb();
> +		tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(i));
> +		E1000_PCI_REG_WRITE_RELAXED(tdt_reg_addr, txq->tx_tail);
> +		usec_delay(250);
> +	}
> +}
> +
> +static void
> +e1000_flush_rx_ring(struct rte_eth_dev *dev)
> +{
> +	uint32_t rctl, rxdctl;
> +	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	int i;
> +
> +	rctl = E1000_READ_REG(hw, E1000_RCTL);
> +	E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
> +	E1000_WRITE_FLUSH(hw);
> +	usec_delay(150);
> +
> +	for (i = 0; i < dev->data->nb_rx_queues &&
> +		i < E1000_I219_MAX_RX_QUEUE_NUM; i++) {
> +		rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
> +		/* zero the lower 14 bits (prefetch and host thresholds) */
> +		rxdctl &= 0xffffc000;
> +
> +		/* update thresholds: prefetch threshold to 31,
> +		 * host threshold to 1 and make sure the granularity
> +		 * is "descriptors" and not "cache lines"
> +		 */
> +		rxdctl |= (0x1F | (1UL << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
> +
> +		E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
> +	}
> +	/* momentarily enable the RX ring for the changes to take effect */
> +	E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN);
> +	E1000_WRITE_FLUSH(hw);
> +	usec_delay(150);
> +	E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
> +}
> +
> +/**
> + * em_flush_desc_rings - remove all descriptors from the descriptor rings
> + *
> + * In i219, the descriptor rings must be emptied before resetting/closing the
> + * HW. Failure to do this will cause the HW to enter a unit hang state which
> + * can only be released by PCI reset on the device
> + *
> + */
> +
> +void
> +em_flush_desc_rings(struct rte_eth_dev *dev)
> +{
> +	uint32_t fextnvm11, tdlen;
> +	struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
> +	uint16_t pci_cfg_status = 0;
> +
> +	fextnvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
> +	E1000_WRITE_REG(hw, E1000_FEXTNVM11,
> +			fextnvm11 | E1000_FEXTNVM11_DISABLE_MULR_FIX);
> +	tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
> +	rte_pci_read_config(pci_dev, &pci_cfg_status, sizeof(pci_cfg_status),
> +				PCI_CFG_STATUS_REG);
> +
> +	/* do nothing if we're not in faulty state, or if the queue is empty */
> +	if ((pci_cfg_status & FLUSH_DESC_REQUIRED) && tdlen) {
> +		/* flush desc ring */
> +		e1000_flush_tx_ring(dev);
> +		rte_pci_read_config(pci_dev, &pci_cfg_status,
> +				sizeof(pci_cfg_status), PCI_CFG_STATUS_REG);
> +		if (pci_cfg_status & FLUSH_DESC_REQUIRED)
> +			e1000_flush_rx_ring(dev);
> +	}
> +}
> 


  parent reply index

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <1562838693-5160-1-git-send-email-xiao.zhang@intel.com>
2019-07-22 11:19 ` [dpdk-stable] [v6] " Xiao Zhang
2019-07-22  9:26   ` [dpdk-stable] [dpdk-dev] " Ye Xiaolong
2019-07-22  3:18     ` Zhang, Xiao
2019-07-22 12:19   ` [dpdk-stable] [v7] " Xiao Zhang
2019-07-22 12:34     ` [dpdk-stable] [dpdk-dev] " Ye Xiaolong
2019-07-22 15:11     ` [dpdk-stable] [v8] " Xiao Zhang
2019-07-24  0:33       ` [dpdk-stable] [dpdk-dev] " Zhang, Qi Z
2019-09-04 12:22       ` Kevin Traynor [this message]
2019-09-05  1:33         ` Zhang, Xiao
2019-09-05  6:06       ` Gavin Hu (Arm Technology China)
2019-09-06  4:30         ` Zhang, Xiao

Reply instructions:

You may reply publically to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aa94c624-123c-59b7-a1d4-6bd9fd22ffe9@redhat.com \
    --to=ktraynor@redhat.com \
    --cc=dev@dpdk.org \
    --cc=stable@dpdk.org \
    --cc=wei.zhao1@intel.com \
    --cc=wenzhuo.lu@intel.com \
    --cc=xiao.zhang@intel.com \
    --cc=xiaolong.ye@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

patches for DPDK stable branches

Archives are clonable:
	git clone --mirror http://inbox.dpdk.org/stable/0 stable/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 stable stable/ http://inbox.dpdk.org/stable \
		stable@dpdk.org
	public-inbox-index stable


Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.stable


AGPL code for this site: git clone https://public-inbox.org/ public-inbox