DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH v2] ethdev: add new RX/TX queue state arrays in rte_eth_dev_data
@ 2015-09-16 21:51 Pablo de Lara
  2015-09-21 22:40 ` Stephen Hemminger
  2015-09-23 11:49 ` Ananyev, Konstantin
  0 siblings, 2 replies; 7+ messages in thread
From: Pablo de Lara @ 2015-09-16 21:51 UTC (permalink / raw)
  To: dev

Following the same approach taken with dev_started field
in rte_eth_dev_data structure, this patch adds two new fields
in it, rx_queue_state and tx_queue_state arrays, which track
which queues have been started and which not.

This is important to avoid trying to start/stop twice a queue,
which will result in undefined behaviour
(which may cause RX/TX disruption).

Mind that only the PMDs which have queue_start/stop functions
have been changed to update this field, as the functions will
check the queue state before switching it.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---
Changes in v2:
- Added macros to define the different queue states

 drivers/net/cxgbe/cxgbe_ethdev.c  | 29 +++++++++++++++++++++++++----
 drivers/net/enic/enic_ethdev.c    |  6 ++++++
 drivers/net/fm10k/fm10k_ethdev.c  |  4 ++++
 drivers/net/i40e/i40e_ethdev_vf.c |  6 ++++++
 drivers/net/i40e/i40e_rxtx.c      |  7 ++++++-
 drivers/net/ixgbe/ixgbe_rxtx.c    |  4 ++++
 lib/librte_ether/rte_ethdev.c     | 28 ++++++++++++++++++++++++++++
 lib/librte_ether/rte_ethdev.h     | 10 ++++++++++
 8 files changed, 89 insertions(+), 5 deletions(-)

diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 478051a..305959e 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -368,23 +368,33 @@ static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
 				    uint16_t tx_queue_id)
 {
+	int ret;
 	struct sge_eth_txq *txq = (struct sge_eth_txq *)
 				  (eth_dev->data->tx_queues[tx_queue_id]);
 
 	dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
 
-	return t4_sge_eth_txq_start(txq);
+	ret = t4_sge_eth_txq_start(txq);
+	if (ret == 0)
+		eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return ret;
 }
 
 static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
 				   uint16_t tx_queue_id)
 {
+	int ret;
 	struct sge_eth_txq *txq = (struct sge_eth_txq *)
 				  (eth_dev->data->tx_queues[tx_queue_id]);
 
 	dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
 
-	return t4_sge_eth_txq_stop(txq);
+	ret = t4_sge_eth_txq_stop(txq);
+	if (ret == 0)
+		eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return ret;
 }
 
 static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
@@ -460,6 +470,7 @@ static void cxgbe_dev_tx_queue_release(void *q)
 static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
 				    uint16_t rx_queue_id)
 {
+	int ret;
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adap = pi->adapter;
 	struct sge_rspq *q;
@@ -468,12 +479,18 @@ static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
 		  __func__, pi->port_id, rx_queue_id);
 
 	q = eth_dev->data->rx_queues[rx_queue_id];
-	return t4_sge_eth_rxq_start(adap, q);
+
+	ret = t4_sge_eth_rxq_start(adap, q);
+	if (ret == 0)
+		eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return ret;
 }
 
 static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
 				   uint16_t rx_queue_id)
 {
+	int ret;
 	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
 	struct adapter *adap = pi->adapter;
 	struct sge_rspq *q;
@@ -482,7 +499,11 @@ static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
 		  __func__, pi->port_id, rx_queue_id);
 
 	q = eth_dev->data->rx_queues[rx_queue_id];
-	return t4_sge_eth_rxq_stop(adap, q);
+	ret = t4_sge_eth_rxq_stop(adap, q);
+	if (ret == 0)
+		eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return ret;
 }
 
 static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 8280cea..56b22f1 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -194,6 +194,7 @@ static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
 	ENICPMD_FUNC_TRACE();
 
 	enic_start_wq(enic, queue_idx);
+	eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
 
 	return 0;
 }
@@ -209,6 +210,8 @@ static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
 	ret = enic_stop_wq(enic, queue_idx);
 	if (ret)
 		dev_err(enic, "error in stopping wq %d\n", queue_idx);
+	else
+		eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
 
 	return ret;
 }
@@ -221,6 +224,7 @@ static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
 	ENICPMD_FUNC_TRACE();
 
 	enic_start_rq(enic, queue_idx);
+	eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
 
 	return 0;
 }
@@ -236,6 +240,8 @@ static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
 	ret = enic_stop_rq(enic, queue_idx);
 	if (ret)
 		dev_err(enic, "error in stopping rq %d\n", queue_idx);
+	else
+		eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
 
 	return ret;
 }
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index a69c990..b7e7b5a 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -524,6 +524,7 @@ fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		 */
 		FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
 		FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+		dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 	}
 
 	return err;
@@ -542,6 +543,7 @@ fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		/* Free mbuf and clean HW ring */
 		rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
+		dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 
 	return 0;
@@ -569,6 +571,7 @@ fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
 					FM10K_TXDCTL_ENABLE | txdctl);
 		FM10K_WRITE_FLUSH(hw);
+		dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 	} else
 		err = -1;
 
@@ -585,6 +588,7 @@ fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	if (tx_queue_id < dev->data->nb_tx_queues) {
 		tx_queue_disable(hw, tx_queue_id);
 		tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
+		dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 
 	return 0;
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index b694400..0f07a77 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1373,6 +1373,8 @@ i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		if (err)
 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
 				    rx_queue_id);
+		else
+			dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 	}
 
 	return err;
@@ -1397,6 +1399,7 @@ i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		i40e_rx_queue_release_mbufs(rxq);
 		i40e_reset_rx_queue(rxq);
+		dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 
 	return 0;
@@ -1417,6 +1420,8 @@ i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		if (err)
 			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
 				    tx_queue_id);
+		else
+			dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 	}
 
 	return err;
@@ -1441,6 +1446,7 @@ i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 		i40e_tx_queue_release_mbufs(txq);
 		i40e_reset_tx_queue(txq);
+		dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 
 	return 0;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index fd656d5..dc297b1 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2009,7 +2009,8 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 			i40e_rx_queue_release_mbufs(rxq);
 			i40e_reset_rx_queue(rxq);
-		}
+		} else
+			dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 	}
 
 	return err;
@@ -2038,6 +2039,7 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		}
 		i40e_rx_queue_release_mbufs(rxq);
 		i40e_reset_rx_queue(rxq);
+		dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 
 	return 0;
@@ -2063,6 +2065,8 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		if (err)
 			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
 				    tx_queue_id);
+		else
+			dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 	}
 
 	return err;
@@ -2092,6 +2096,7 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 		i40e_tx_queue_release_mbufs(txq);
 		i40e_reset_tx_queue(txq);
+		dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 
 	return 0;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a598a72..5011a6a 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -4498,6 +4498,7 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		rte_wmb();
 		IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
 		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+		dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 	} else
 		return -1;
 
@@ -4541,6 +4542,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 		ixgbe_rx_queue_release_mbufs(rxq);
 		ixgbe_reset_rx_queue(adapter, rxq);
+		dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 	} else
 		return -1;
 
@@ -4583,6 +4585,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		rte_wmb();
 		IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
 		IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+		dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 	} else
 		return -1;
 
@@ -4643,6 +4646,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 			txq->ops->release_mbufs(txq);
 			txq->ops->reset(txq);
 		}
+		dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 	} else
 		return -1;
 
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index b309309..5af35c9 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -767,6 +767,13 @@ rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
 
 	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
 
+	if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
+		PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
+			" already started\n",
+			rx_queue_id, port_id);
+		return 0;
+	}
+
 	return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
 
 }
@@ -790,6 +797,13 @@ rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
 
 	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
 
+	if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
+		PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
+			" already stopped\n",
+			rx_queue_id, port_id);
+		return 0;
+	}
+
 	return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
 
 }
@@ -813,6 +827,13 @@ rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
 
 	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
 
+	if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
+		PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
+			" already started\n",
+			tx_queue_id, port_id);
+		return 0;
+	}
+
 	return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
 
 }
@@ -836,6 +857,13 @@ rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
 
 	FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
 
+	if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
+		PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
+			" already stopped\n",
+			tx_queue_id, port_id);
+		return 0;
+	}
+
 	return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
 
 }
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index fa06554..120808b 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -962,6 +962,12 @@ struct rte_eth_xstats {
 	uint64_t value;
 };
 
+/**
+ * RX/TX queue states
+ */
+#define RTE_ETH_QUEUE_STATE_STOPPED  0
+#define RTE_ETH_QUEUE_STATE_STARTED 1
+
 struct rte_eth_dev;
 
 struct rte_eth_dev_callback;
@@ -1635,6 +1641,10 @@ struct rte_eth_dev_data {
 		all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */
 		dev_started : 1,   /**< Device state: STARTED(1) / STOPPED(0). */
 		lro         : 1;   /**< RX LRO is ON(1) / OFF(0) */
+	uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
+	/** Queues state: STARTED(1) / STOPPED(0) */
+	uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
+	/** Queues state: STARTED(1) / STOPPED(0) */
 };
 
 /**
-- 
2.4.2

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2015-11-04 16:54 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-09-16 21:51 [dpdk-dev] [PATCH v2] ethdev: add new RX/TX queue state arrays in rte_eth_dev_data Pablo de Lara
2015-09-21 22:40 ` Stephen Hemminger
2015-09-23  6:56   ` Qiu, Michael
2015-09-23  7:39     ` De Lara Guarch, Pablo
2015-09-23  8:22     ` Ananyev, Konstantin
2015-09-23 11:49 ` Ananyev, Konstantin
2015-11-04 16:53   ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).