* Re: [dpdk-dev] [v3] net/e1000: i219 unit hang issue fix on reset/close
2019-07-09 12:23 [dpdk-dev] [v3] net/e1000: i219 unit hang issue fix on reset/close Xiao Zhang
@ 2019-07-09 3:38 ` Zhao1, Wei
2019-07-09 6:36 ` Anand H. Krishnan
1 sibling, 0 replies; 4+ messages in thread
From: Zhao1, Wei @ 2019-07-09 3:38 UTC (permalink / raw)
To: Zhang, Xiao, dev
Acked-by: Wei Zhao <wei.zhao1@intel.com>
> -----Original Message-----
> From: Zhang, Xiao
> Sent: Tuesday, July 9, 2019 8:23 PM
> To: dev@dpdk.org
> Cc: Zhao1, Wei <wei.zhao1@intel.com>; Zhang, Xiao <xiao.zhang@intel.com>
> Subject: [v3] net/e1000: i219 unit hang issue fix on reset/close
>
> Unit hang may occur if multiple descriptors are available in the rings during
> reset or close. This state can be detected by configure status by bit 8 in register.
> If the bit is set and there are pending descriptors in one of the rings, we must
> flush them before reset or close.
>
> Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
> ---
> drivers/net/e1000/e1000_ethdev.h | 4 ++
> drivers/net/e1000/igb_ethdev.c | 4 ++
> drivers/net/e1000/igb_rxtx.c | 105
> +++++++++++++++++++++++++++++++++++++++
> 3 files changed, 113 insertions(+)
>
> diff --git a/drivers/net/e1000/e1000_ethdev.h
> b/drivers/net/e1000/e1000_ethdev.h
> index 67acb73..349144a 100644
> --- a/drivers/net/e1000/e1000_ethdev.h
> +++ b/drivers/net/e1000/e1000_ethdev.h
> @@ -35,6 +35,9 @@
> #define IGB_MAX_RX_QUEUE_NUM 8
> #define IGB_MAX_RX_QUEUE_NUM_82576 16
>
> +#define E1000_I219_MAX_RX_QUEUE_NUM 2
> +#define E1000_I219_MAX_TX_QUEUE_NUM 2
> +
> #define E1000_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field
> */
> #define E1000_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field
> */
> #define E1000_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */
> @@ -522,5 +525,6 @@ int igb_action_rss_same(const struct
> rte_flow_action_rss *comp, int igb_config_rss_filter(struct rte_eth_dev *dev,
> struct igb_rte_flow_rss_conf *conf,
> bool add);
> +void igb_flush_desc_rings(struct rte_eth_dev *dev);
>
> #endif /* _E1000_ETHDEV_H_ */
> diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
> index 3ee28cf..845101b 100644
> --- a/drivers/net/e1000/igb_ethdev.c
> +++ b/drivers/net/e1000/igb_ethdev.c
> @@ -1589,6 +1589,10 @@ eth_igb_close(struct rte_eth_dev *dev)
> eth_igb_stop(dev);
> adapter->stopped = 1;
>
> + /* Flush desc rings for i219 */
> + if (hw->mac.type >= e1000_pch_spt)
> + igb_flush_desc_rings(dev);
> +
> e1000_phy_hw_reset(hw);
> igb_release_manageability(hw);
> igb_hw_control_release(hw);
> diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index
> c5606de..48e1c1e 100644
> --- a/drivers/net/e1000/igb_rxtx.c
> +++ b/drivers/net/e1000/igb_rxtx.c
> @@ -18,6 +18,7 @@
> #include <rte_log.h>
> #include <rte_debug.h>
> #include <rte_pci.h>
> +#include <rte_bus_pci.h>
> #include <rte_memory.h>
> #include <rte_memcpy.h>
> #include <rte_memzone.h>
> @@ -63,6 +64,10 @@
> #define IGB_TX_OFFLOAD_NOTSUP_MASK \
> (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
>
> +/* PCI offset for querying descriptor ring status*/
> +#define PCICFG_DESC_RING_STATUS 0xE4
> +#define FLUSH_DESC_REQUIRED 0x100
> +
> /**
> * Structure associated with each descriptor of the RX ring of a RX queue.
> */
> @@ -2962,3 +2967,103 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
>
> return 0;
> }
> +
> +static void e1000_flush_tx_ring(struct rte_eth_dev *dev) {
> + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> + volatile union e1000_adv_tx_desc *tx_desc;
> + uint32_t tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
> + uint16_t size = 512;
> + struct igb_tx_queue *txq;
> + int i;
> +
> + if (dev->data->tx_queues == NULL)
> + return;
> + tctl = E1000_READ_REG(hw, E1000_TCTL);
> + E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN);
> + for (i = 0; i < dev->data->nb_tx_queues &&
> + i < E1000_I219_MAX_TX_QUEUE_NUM; i++) {
> + txq = dev->data->tx_queues[i];
> + tdt = E1000_READ_REG(hw, E1000_TDT(i));
> + if (tdt != txq->tx_tail)
> + return;
> + tx_desc = txq->tx_ring;
> + tx_desc->read.buffer_addr = txq->tx_ring_phys_addr;
> + tx_desc->read.cmd_type_len = rte_cpu_to_le_32(txd_lower |
> size);
> + tx_desc->read.olinfo_status = 0;
> +
> + rte_wmb();
> + txq->tx_tail++;
> + if (txq->tx_tail == txq->nb_tx_desc)
> + txq->tx_tail = 0;
> + rte_io_wmb();
> + E1000_WRITE_REG(hw, E1000_TDT(i), txq->tx_tail);
> + usec_delay(250);
> + }
> +}
> +
> +static void e1000_flush_rx_ring(struct rte_eth_dev *dev) {
> + uint32_t rctl, rxdctl;
> + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> + int i;
> +
> + rctl = E1000_READ_REG(hw, E1000_RCTL);
> + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
> + E1000_WRITE_FLUSH(hw);
> + usec_delay(150);
> +
> + for (i = 0; i < dev->data->nb_rx_queues &&
> + i < E1000_I219_MAX_RX_QUEUE_NUM; i++) {
> + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
> + /* zero the lower 14 bits (prefetch and host thresholds) */
> + rxdctl &= 0xffffc000;
> +
> + /* update thresholds: prefetch threshold to 31,
> + * host threshold to 1 and make sure the granularity
> + * is "descriptors" and not "cache lines"
> + */
> + rxdctl |= (0x1F | (1UL << 8) |
> E1000_RXDCTL_THRESH_UNIT_DESC);
> +
> + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
> + }
> + /* momentarily enable the RX ring for the changes to take effect */
> + E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN);
> + E1000_WRITE_FLUSH(hw);
> + usec_delay(150);
> + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); }
> +
> +/**
> + * igb_flush_desc_rings - remove all descriptors from the descriptor
> +rings
> + *
> + * In i219, the descriptor rings must be emptied before
> +resetting/closing the
> + * HW. Failure to do this will cause the HW to enter a unit hang state
> +which
> + * can only be released by PCI reset on the device
> + *
> + */
> +
> +void igb_flush_desc_rings(struct rte_eth_dev *dev) {
> + uint32_t fextnvm11, tdlen;
> + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
> + uint32_t hang_state = 0;
> +
> + fextnvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
> + E1000_WRITE_REG(hw, E1000_FEXTNVM11,
> + fextnvm11 | E1000_FEXTNVM11_DISABLE_MULR_FIX);
> + tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
> + rte_pci_read_config(pci_dev, &hang_state, sizeof(hang_state),
> + PCICFG_DESC_RING_STATUS);
> +
> + /* do nothing if we're not in faulty state, or if the queue is empty */
> + if ((hang_state & FLUSH_DESC_REQUIRED) && tdlen) {
> + /* flush desc ring */
> + e1000_flush_tx_ring(dev);
> + rte_pci_read_config(pci_dev, &hang_state, sizeof(hang_state),
> + PCICFG_DESC_RING_STATUS);
> + if (hang_state & FLUSH_DESC_REQUIRED)
> + e1000_flush_rx_ring(dev);
> + }
> +}
> --
> 2.7.4
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dpdk-dev] [v3] net/e1000: i219 unit hang issue fix on reset/close
2019-07-09 12:23 [dpdk-dev] [v3] net/e1000: i219 unit hang issue fix on reset/close Xiao Zhang
2019-07-09 3:38 ` Zhao1, Wei
@ 2019-07-09 6:36 ` Anand H. Krishnan
2019-07-09 15:48 ` Zhang, Xiao
1 sibling, 1 reply; 4+ messages in thread
From: Anand H. Krishnan @ 2019-07-09 6:36 UTC (permalink / raw)
To: Xiao Zhang; +Cc: dev, Zhao1, Wei
Comments inline.
On Tue, Jul 9, 2019 at 8:58 AM Xiao Zhang <xiao.zhang@intel.com> wrote:
>
> Unit hang may occur if multiple descriptors are available in the rings
> during reset or close. This state can be detected by configure status
> by bit 8 in register. If the bit is set and there are pending descriptors
> in one of the rings, we must flush them before reset or close.
>
> Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
> ---
> drivers/net/e1000/e1000_ethdev.h | 4 ++
> drivers/net/e1000/igb_ethdev.c | 4 ++
> drivers/net/e1000/igb_rxtx.c | 105 +++++++++++++++++++++++++++++++++++++++
> 3 files changed, 113 insertions(+)
>
> diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
> index 67acb73..349144a 100644
> --- a/drivers/net/e1000/e1000_ethdev.h
> +++ b/drivers/net/e1000/e1000_ethdev.h
> @@ -35,6 +35,9 @@
> #define IGB_MAX_RX_QUEUE_NUM 8
> #define IGB_MAX_RX_QUEUE_NUM_82576 16
>
> +#define E1000_I219_MAX_RX_QUEUE_NUM 2
> +#define E1000_I219_MAX_TX_QUEUE_NUM 2
> +
> #define E1000_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */
> #define E1000_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field */
> #define E1000_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */
> @@ -522,5 +525,6 @@ int igb_action_rss_same(const struct rte_flow_action_rss *comp,
> int igb_config_rss_filter(struct rte_eth_dev *dev,
> struct igb_rte_flow_rss_conf *conf,
> bool add);
> +void igb_flush_desc_rings(struct rte_eth_dev *dev);
>
> #endif /* _E1000_ETHDEV_H_ */
> diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
> index 3ee28cf..845101b 100644
> --- a/drivers/net/e1000/igb_ethdev.c
> +++ b/drivers/net/e1000/igb_ethdev.c
> @@ -1589,6 +1589,10 @@ eth_igb_close(struct rte_eth_dev *dev)
> eth_igb_stop(dev);
> adapter->stopped = 1;
>
> + /* Flush desc rings for i219 */
> + if (hw->mac.type >= e1000_pch_spt)
> + igb_flush_desc_rings(dev);
> +
> e1000_phy_hw_reset(hw);
> igb_release_manageability(hw);
> igb_hw_control_release(hw);
> diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
> index c5606de..48e1c1e 100644
> --- a/drivers/net/e1000/igb_rxtx.c
> +++ b/drivers/net/e1000/igb_rxtx.c
> @@ -18,6 +18,7 @@
> #include <rte_log.h>
> #include <rte_debug.h>
> #include <rte_pci.h>
> +#include <rte_bus_pci.h>
> #include <rte_memory.h>
> #include <rte_memcpy.h>
> #include <rte_memzone.h>
> @@ -63,6 +64,10 @@
> #define IGB_TX_OFFLOAD_NOTSUP_MASK \
> (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
>
> +/* PCI offset for querying descriptor ring status*/
> +#define PCICFG_DESC_RING_STATUS 0xE4
> +#define FLUSH_DESC_REQUIRED 0x100
> +
> /**
> * Structure associated with each descriptor of the RX ring of a RX queue.
> */
> @@ -2962,3 +2967,103 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
>
> return 0;
> }
> +
> +static void e1000_flush_tx_ring(struct rte_eth_dev *dev)
> +{
> + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + volatile union e1000_adv_tx_desc *tx_desc;
> + uint32_t tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
> + uint16_t size = 512;
> + struct igb_tx_queue *txq;
> + int i;
> +
> + if (dev->data->tx_queues == NULL)
> + return;
> + tctl = E1000_READ_REG(hw, E1000_TCTL);
> + E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN);
> + for (i = 0; i < dev->data->nb_tx_queues &&
> + i < E1000_I219_MAX_TX_QUEUE_NUM; i++) {
> + txq = dev->data->tx_queues[i];
> + tdt = E1000_READ_REG(hw, E1000_TDT(i));
> + if (tdt != txq->tx_tail)
> + return;
> + tx_desc = txq->tx_ring;
This doesn't seem to be the descriptor in tail. Are you sure this is
what the original
patch does?
Thanks,
Anand
> + tx_desc->read.buffer_addr = txq->tx_ring_phys_addr;
> + tx_desc->read.cmd_type_len = rte_cpu_to_le_32(txd_lower | size);
> + tx_desc->read.olinfo_status = 0;
> +
> + rte_wmb();
> + txq->tx_tail++;
> + if (txq->tx_tail == txq->nb_tx_desc)
> + txq->tx_tail = 0;
> + rte_io_wmb();
> + E1000_WRITE_REG(hw, E1000_TDT(i), txq->tx_tail);
> + usec_delay(250);
> + }
> +}
> +
> +static void e1000_flush_rx_ring(struct rte_eth_dev *dev)
> +{
> + uint32_t rctl, rxdctl;
> + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + int i;
> +
> + rctl = E1000_READ_REG(hw, E1000_RCTL);
> + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
> + E1000_WRITE_FLUSH(hw);
> + usec_delay(150);
> +
> + for (i = 0; i < dev->data->nb_rx_queues &&
> + i < E1000_I219_MAX_RX_QUEUE_NUM; i++) {
> + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
> + /* zero the lower 14 bits (prefetch and host thresholds) */
> + rxdctl &= 0xffffc000;
> +
> + /* update thresholds: prefetch threshold to 31,
> + * host threshold to 1 and make sure the granularity
> + * is "descriptors" and not "cache lines"
> + */
> + rxdctl |= (0x1F | (1UL << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
> +
> + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
> + }
> + /* momentarily enable the RX ring for the changes to take effect */
> + E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN);
> + E1000_WRITE_FLUSH(hw);
> + usec_delay(150);
> + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
> +}
> +
> +/**
> + * igb_flush_desc_rings - remove all descriptors from the descriptor rings
> + *
> + * In i219, the descriptor rings must be emptied before resetting/closing the
> + * HW. Failure to do this will cause the HW to enter a unit hang state which
> + * can only be released by PCI reset on the device
> + *
> + */
> +
> +void igb_flush_desc_rings(struct rte_eth_dev *dev)
> +{
> + uint32_t fextnvm11, tdlen;
> + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
> + uint32_t hang_state = 0;
> +
> + fextnvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
> + E1000_WRITE_REG(hw, E1000_FEXTNVM11,
> + fextnvm11 | E1000_FEXTNVM11_DISABLE_MULR_FIX);
> + tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
> + rte_pci_read_config(pci_dev, &hang_state, sizeof(hang_state),
> + PCICFG_DESC_RING_STATUS);
> +
> + /* do nothing if we're not in faulty state, or if the queue is empty */
> + if ((hang_state & FLUSH_DESC_REQUIRED) && tdlen) {
> + /* flush desc ring */
> + e1000_flush_tx_ring(dev);
> + rte_pci_read_config(pci_dev, &hang_state, sizeof(hang_state),
> + PCICFG_DESC_RING_STATUS);
> + if (hang_state & FLUSH_DESC_REQUIRED)
> + e1000_flush_rx_ring(dev);
> + }
> +}
> --
> 2.7.4
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* [dpdk-dev] [v3] net/e1000: i219 unit hang issue fix on reset/close
@ 2019-07-09 12:23 Xiao Zhang
2019-07-09 3:38 ` Zhao1, Wei
2019-07-09 6:36 ` Anand H. Krishnan
0 siblings, 2 replies; 4+ messages in thread
From: Xiao Zhang @ 2019-07-09 12:23 UTC (permalink / raw)
To: dev; +Cc: wei.zhao1, Xiao Zhang
Unit hang may occur if multiple descriptors are available in the rings
during reset or close. This state can be detected by configure status
by bit 8 in register. If the bit is set and there are pending descriptors
in one of the rings, we must flush them before reset or close.
Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
---
drivers/net/e1000/e1000_ethdev.h | 4 ++
drivers/net/e1000/igb_ethdev.c | 4 ++
drivers/net/e1000/igb_rxtx.c | 105 +++++++++++++++++++++++++++++++++++++++
3 files changed, 113 insertions(+)
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 67acb73..349144a 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -35,6 +35,9 @@
#define IGB_MAX_RX_QUEUE_NUM 8
#define IGB_MAX_RX_QUEUE_NUM_82576 16
+#define E1000_I219_MAX_RX_QUEUE_NUM 2
+#define E1000_I219_MAX_TX_QUEUE_NUM 2
+
#define E1000_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */
#define E1000_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field */
#define E1000_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */
@@ -522,5 +525,6 @@ int igb_action_rss_same(const struct rte_flow_action_rss *comp,
int igb_config_rss_filter(struct rte_eth_dev *dev,
struct igb_rte_flow_rss_conf *conf,
bool add);
+void igb_flush_desc_rings(struct rte_eth_dev *dev);
#endif /* _E1000_ETHDEV_H_ */
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 3ee28cf..845101b 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -1589,6 +1589,10 @@ eth_igb_close(struct rte_eth_dev *dev)
eth_igb_stop(dev);
adapter->stopped = 1;
+ /* Flush desc rings for i219 */
+ if (hw->mac.type >= e1000_pch_spt)
+ igb_flush_desc_rings(dev);
+
e1000_phy_hw_reset(hw);
igb_release_manageability(hw);
igb_hw_control_release(hw);
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index c5606de..48e1c1e 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -18,6 +18,7 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
@@ -63,6 +64,10 @@
#define IGB_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
+/* PCI offset for querying descriptor ring status*/
+#define PCICFG_DESC_RING_STATUS 0xE4
+#define FLUSH_DESC_REQUIRED 0x100
+
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
@@ -2962,3 +2967,103 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
return 0;
}
+
+static void e1000_flush_tx_ring(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ volatile union e1000_adv_tx_desc *tx_desc;
+ uint32_t tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
+ uint16_t size = 512;
+ struct igb_tx_queue *txq;
+ int i;
+
+ if (dev->data->tx_queues == NULL)
+ return;
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN);
+ for (i = 0; i < dev->data->nb_tx_queues &&
+ i < E1000_I219_MAX_TX_QUEUE_NUM; i++) {
+ txq = dev->data->tx_queues[i];
+ tdt = E1000_READ_REG(hw, E1000_TDT(i));
+ if (tdt != txq->tx_tail)
+ return;
+ tx_desc = txq->tx_ring;
+ tx_desc->read.buffer_addr = txq->tx_ring_phys_addr;
+ tx_desc->read.cmd_type_len = rte_cpu_to_le_32(txd_lower | size);
+ tx_desc->read.olinfo_status = 0;
+
+ rte_wmb();
+ txq->tx_tail++;
+ if (txq->tx_tail == txq->nb_tx_desc)
+ txq->tx_tail = 0;
+ rte_io_wmb();
+ E1000_WRITE_REG(hw, E1000_TDT(i), txq->tx_tail);
+ usec_delay(250);
+ }
+}
+
+static void e1000_flush_rx_ring(struct rte_eth_dev *dev)
+{
+ uint32_t rctl, rxdctl;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(150);
+
+ for (i = 0; i < dev->data->nb_rx_queues &&
+ i < E1000_I219_MAX_RX_QUEUE_NUM; i++) {
+ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
+ /* zero the lower 14 bits (prefetch and host thresholds) */
+ rxdctl &= 0xffffc000;
+
+ /* update thresholds: prefetch threshold to 31,
+ * host threshold to 1 and make sure the granularity
+ * is "descriptors" and not "cache lines"
+ */
+ rxdctl |= (0x1F | (1UL << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
+
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
+ }
+ /* momentarily enable the RX ring for the changes to take effect */
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(150);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+}
+
+/**
+ * igb_flush_desc_rings - remove all descriptors from the descriptor rings
+ *
+ * In i219, the descriptor rings must be emptied before resetting/closing the
+ * HW. Failure to do this will cause the HW to enter a unit hang state which
+ * can only be released by PCI reset on the device
+ *
+ */
+
+void igb_flush_desc_rings(struct rte_eth_dev *dev)
+{
+ uint32_t fextnvm11, tdlen;
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint32_t hang_state = 0;
+
+ fextnvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
+ E1000_WRITE_REG(hw, E1000_FEXTNVM11,
+ fextnvm11 | E1000_FEXTNVM11_DISABLE_MULR_FIX);
+ tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
+ rte_pci_read_config(pci_dev, &hang_state, sizeof(hang_state),
+ PCICFG_DESC_RING_STATUS);
+
+ /* do nothing if we're not in faulty state, or if the queue is empty */
+ if ((hang_state & FLUSH_DESC_REQUIRED) && tdlen) {
+ /* flush desc ring */
+ e1000_flush_tx_ring(dev);
+ rte_pci_read_config(pci_dev, &hang_state, sizeof(hang_state),
+ PCICFG_DESC_RING_STATUS);
+ if (hang_state & FLUSH_DESC_REQUIRED)
+ e1000_flush_rx_ring(dev);
+ }
+}
--
2.7.4
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dpdk-dev] [v3] net/e1000: i219 unit hang issue fix on reset/close
2019-07-09 6:36 ` Anand H. Krishnan
@ 2019-07-09 15:48 ` Zhang, Xiao
0 siblings, 0 replies; 4+ messages in thread
From: Zhang, Xiao @ 2019-07-09 15:48 UTC (permalink / raw)
To: Anand H. Krishnan; +Cc: dev, Zhao1, Wei
The tail index was missing in this patch, and a new patch is sent out.
-----Original Message-----
From: Anand H. Krishnan [mailto:anandhkrishnan@gmail.com]
Sent: Tuesday, July 9, 2019 2:37 PM
To: Zhang, Xiao <xiao.zhang@intel.com>
Cc: dev@dpdk.org; Zhao1, Wei <wei.zhao1@intel.com>
Subject: Re: [dpdk-dev] [v3] net/e1000: i219 unit hang issue fix on reset/close
Comments inline.
On Tue, Jul 9, 2019 at 8:58 AM Xiao Zhang <xiao.zhang@intel.com> wrote:
>
> Unit hang may occur if multiple descriptors are available in the rings
> during reset or close. This state can be detected by configure status
> by bit 8 in register. If the bit is set and there are pending
> descriptors in one of the rings, we must flush them before reset or close.
>
> Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
> ---
> drivers/net/e1000/e1000_ethdev.h | 4 ++
> drivers/net/e1000/igb_ethdev.c | 4 ++
> drivers/net/e1000/igb_rxtx.c | 105 +++++++++++++++++++++++++++++++++++++++
> 3 files changed, 113 insertions(+)
>
> diff --git a/drivers/net/e1000/e1000_ethdev.h
> b/drivers/net/e1000/e1000_ethdev.h
> index 67acb73..349144a 100644
> --- a/drivers/net/e1000/e1000_ethdev.h
> +++ b/drivers/net/e1000/e1000_ethdev.h
> @@ -35,6 +35,9 @@
> #define IGB_MAX_RX_QUEUE_NUM 8
> #define IGB_MAX_RX_QUEUE_NUM_82576 16
>
> +#define E1000_I219_MAX_RX_QUEUE_NUM 2
> +#define E1000_I219_MAX_TX_QUEUE_NUM 2
> +
> #define E1000_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */
> #define E1000_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field */
> #define E1000_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */
> @@ -522,5 +525,6 @@ int igb_action_rss_same(const struct
> rte_flow_action_rss *comp, int igb_config_rss_filter(struct rte_eth_dev *dev,
> struct igb_rte_flow_rss_conf *conf,
> bool add);
> +void igb_flush_desc_rings(struct rte_eth_dev *dev);
>
> #endif /* _E1000_ETHDEV_H_ */
> diff --git a/drivers/net/e1000/igb_ethdev.c
> b/drivers/net/e1000/igb_ethdev.c index 3ee28cf..845101b 100644
> --- a/drivers/net/e1000/igb_ethdev.c
> +++ b/drivers/net/e1000/igb_ethdev.c
> @@ -1589,6 +1589,10 @@ eth_igb_close(struct rte_eth_dev *dev)
> eth_igb_stop(dev);
> adapter->stopped = 1;
>
> + /* Flush desc rings for i219 */
> + if (hw->mac.type >= e1000_pch_spt)
> + igb_flush_desc_rings(dev);
> +
> e1000_phy_hw_reset(hw);
> igb_release_manageability(hw);
> igb_hw_control_release(hw);
> diff --git a/drivers/net/e1000/igb_rxtx.c
> b/drivers/net/e1000/igb_rxtx.c index c5606de..48e1c1e 100644
> --- a/drivers/net/e1000/igb_rxtx.c
> +++ b/drivers/net/e1000/igb_rxtx.c
> @@ -18,6 +18,7 @@
> #include <rte_log.h>
> #include <rte_debug.h>
> #include <rte_pci.h>
> +#include <rte_bus_pci.h>
> #include <rte_memory.h>
> #include <rte_memcpy.h>
> #include <rte_memzone.h>
> @@ -63,6 +64,10 @@
> #define IGB_TX_OFFLOAD_NOTSUP_MASK \
> (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
>
> +/* PCI offset for querying descriptor ring status*/
> +#define PCICFG_DESC_RING_STATUS 0xE4
> +#define FLUSH_DESC_REQUIRED 0x100
> +
> /**
> * Structure associated with each descriptor of the RX ring of a RX queue.
> */
> @@ -2962,3 +2967,103 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
>
> return 0;
> }
> +
> +static void e1000_flush_tx_ring(struct rte_eth_dev *dev) {
> + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + volatile union e1000_adv_tx_desc *tx_desc;
> + uint32_t tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
> + uint16_t size = 512;
> + struct igb_tx_queue *txq;
> + int i;
> +
> + if (dev->data->tx_queues == NULL)
> + return;
> + tctl = E1000_READ_REG(hw, E1000_TCTL);
> + E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN);
> + for (i = 0; i < dev->data->nb_tx_queues &&
> + i < E1000_I219_MAX_TX_QUEUE_NUM; i++) {
> + txq = dev->data->tx_queues[i];
> + tdt = E1000_READ_REG(hw, E1000_TDT(i));
> + if (tdt != txq->tx_tail)
> + return;
> + tx_desc = txq->tx_ring;
This doesn't seem to be the descriptor in tail. Are you sure this is what the original patch does?
Thanks,
Anand
> + tx_desc->read.buffer_addr = txq->tx_ring_phys_addr;
> + tx_desc->read.cmd_type_len = rte_cpu_to_le_32(txd_lower | size);
> + tx_desc->read.olinfo_status = 0;
> +
> + rte_wmb();
> + txq->tx_tail++;
> + if (txq->tx_tail == txq->nb_tx_desc)
> + txq->tx_tail = 0;
> + rte_io_wmb();
> + E1000_WRITE_REG(hw, E1000_TDT(i), txq->tx_tail);
> + usec_delay(250);
> + }
> +}
> +
> +static void e1000_flush_rx_ring(struct rte_eth_dev *dev) {
> + uint32_t rctl, rxdctl;
> + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + int i;
> +
> + rctl = E1000_READ_REG(hw, E1000_RCTL);
> + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
> + E1000_WRITE_FLUSH(hw);
> + usec_delay(150);
> +
> + for (i = 0; i < dev->data->nb_rx_queues &&
> + i < E1000_I219_MAX_RX_QUEUE_NUM; i++) {
> + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
> + /* zero the lower 14 bits (prefetch and host thresholds) */
> + rxdctl &= 0xffffc000;
> +
> + /* update thresholds: prefetch threshold to 31,
> + * host threshold to 1 and make sure the granularity
> + * is "descriptors" and not "cache lines"
> + */
> + rxdctl |= (0x1F | (1UL << 8) |
> + E1000_RXDCTL_THRESH_UNIT_DESC);
> +
> + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
> + }
> + /* momentarily enable the RX ring for the changes to take effect */
> + E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN);
> + E1000_WRITE_FLUSH(hw);
> + usec_delay(150);
> + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); }
> +
> +/**
> + * igb_flush_desc_rings - remove all descriptors from the descriptor
> +rings
> + *
> + * In i219, the descriptor rings must be emptied before
> +resetting/closing the
> + * HW. Failure to do this will cause the HW to enter a unit hang
> +state which
> + * can only be released by PCI reset on the device
> + *
> + */
> +
> +void igb_flush_desc_rings(struct rte_eth_dev *dev) {
> + uint32_t fextnvm11, tdlen;
> + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
> + uint32_t hang_state = 0;
> +
> + fextnvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
> + E1000_WRITE_REG(hw, E1000_FEXTNVM11,
> + fextnvm11 | E1000_FEXTNVM11_DISABLE_MULR_FIX);
> + tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
> + rte_pci_read_config(pci_dev, &hang_state, sizeof(hang_state),
> + PCICFG_DESC_RING_STATUS);
> +
> + /* do nothing if we're not in faulty state, or if the queue is empty */
> + if ((hang_state & FLUSH_DESC_REQUIRED) && tdlen) {
> + /* flush desc ring */
> + e1000_flush_tx_ring(dev);
> + rte_pci_read_config(pci_dev, &hang_state, sizeof(hang_state),
> + PCICFG_DESC_RING_STATUS);
> + if (hang_state & FLUSH_DESC_REQUIRED)
> + e1000_flush_rx_ring(dev);
> + }
> +}
> --
> 2.7.4
>
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2019-07-09 15:48 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-09 12:23 [dpdk-dev] [v3] net/e1000: i219 unit hang issue fix on reset/close Xiao Zhang
2019-07-09 3:38 ` Zhao1, Wei
2019-07-09 6:36 ` Anand H. Krishnan
2019-07-09 15:48 ` Zhang, Xiao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).