* [dpdk-dev] [PATCH 1/3] net/ixgbe: remove redundant queue id checks
@ 2018-07-18 12:35 Ferruh Yigit
2018-07-18 12:35 ` [dpdk-dev] [PATCH 2/3] net/i40e: " Ferruh Yigit
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Ferruh Yigit @ 2018-07-18 12:35 UTC (permalink / raw)
To: Wenzhuo Lu, Konstantin Ananyev; +Cc: dev, Ferruh Yigit, Keith Wiles
remove queue id checks from dev_ops
ixgbe_dev_[rx/tx]_queue_[start/stop]
queue id checks already done by ethdev APIs that are calling these
dev_ops
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
Cc: Keith Wiles <keith.wiles@intel.com>
---
drivers/net/ixgbe/ixgbe_rxtx.c | 139 +++++++++++++++------------------
1 file changed, 63 insertions(+), 76 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 354181664..f82b74a9a 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -5176,34 +5176,30 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- /* Allocate buffers for descriptor rings */
- if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
- PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
- rx_queue_id);
- return -1;
- }
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- rxdctl |= IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+ rxq = dev->data->rx_queues[rx_queue_id];
- /* Wait until RX Enable ready */
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
- rx_queue_id);
- rte_wmb();
- IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
+ /* Allocate buffers for descriptor rings */
+ if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+ PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
+ rx_queue_id);
return -1;
+ }
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
@@ -5224,30 +5220,26 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+ /* Wait until RX Enable bit clear */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- rxdctl &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+ } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
- /* Wait until RX Enable bit clear */
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
- rx_queue_id);
-
- rte_delay_us(RTE_IXGBE_WAIT_100_US);
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
- ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(adapter, rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- } else
- return -1;
+ ixgbe_rx_queue_release_mbufs(rxq);
+ ixgbe_reset_rx_queue(adapter, rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -5267,30 +5259,27 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+ txq = dev->data->tx_queues[tx_queue_id];
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
- /* Wait until TX Enable ready */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- txdctl = IXGBE_READ_REG(hw,
- IXGBE_TXDCTL(txq->reg_idx));
- } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable "
- "Tx Queue %d", tx_queue_id);
- }
- rte_wmb();
- IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
- return -1;
+ /* Wait until TX Enable ready */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
+ IXGBE_TXDCTL(txq->reg_idx));
+ } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
+ tx_queue_id);
+ }
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
@@ -5310,9 +5299,6 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id >= dev->data->nb_tx_queues)
- return -1;
-
txq = dev->data->tx_queues[tx_queue_id];
/* Wait until TX queue is empty */
@@ -5326,8 +5312,9 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
IXGBE_TDT(txq->reg_idx));
} while (--poll_ms && (txtdh != txtdt));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
- "when stopping.", tx_queue_id);
+ PMD_INIT_LOG(ERR,
+ "Tx Queue %d is not empty when stopping.",
+ tx_queue_id);
}
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
@@ -5343,8 +5330,8 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
IXGBE_TXDCTL(txq->reg_idx));
} while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable "
- "Tx Queue %d", tx_queue_id);
+ PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
+ tx_queue_id);
}
if (txq->ops != NULL) {
--
2.17.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [PATCH 2/3] net/i40e: remove redundant queue id checks
2018-07-18 12:35 [dpdk-dev] [PATCH 1/3] net/ixgbe: remove redundant queue id checks Ferruh Yigit
@ 2018-07-18 12:35 ` Ferruh Yigit
2018-07-19 12:21 ` Zhang, Qi Z
2018-07-18 12:35 ` [dpdk-dev] [PATCH 3/3] net/fm10k: " Ferruh Yigit
2018-07-19 12:33 ` [dpdk-dev] [PATCH 1/3] net/ixgbe: " Zhang, Qi Z
2 siblings, 1 reply; 6+ messages in thread
From: Ferruh Yigit @ 2018-07-18 12:35 UTC (permalink / raw)
To: Wenzhuo Lu, Konstantin Ananyev, Beilei Xing, Qi Zhang
Cc: dev, Ferruh Yigit, Keith Wiles
remove queue id checks from dev_ops
i40e_dev_[rx/tx]_queue_[start/stop]
i40evf_dev_[rx/tx]_queue_[start/stop]
queue id checks already done by ethdev APIs that are calling these
dev_ops
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
Cc: Keith Wiles <keith.wiles@intel.com>
---
drivers/net/i40e/i40e_ethdev_vf.c | 107 +++++++++++--------------
drivers/net/i40e/i40e_rxtx.c | 127 ++++++++++++++----------------
2 files changed, 107 insertions(+), 127 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 3796c9161..001c301b9 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1583,37 +1583,35 @@ static int
i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct i40e_rx_queue *rxq;
- int err = 0;
+ int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = dev->data->rx_queues[rx_queue_id];
- err = i40e_alloc_rx_queue_mbufs(rxq);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
- return err;
- }
-
- rte_wmb();
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
- /* Init the RX tail register. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- I40EVF_WRITE_FLUSH(hw);
+ rte_wmb();
- /* Ready to switch the queue on */
- err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+ /* Init the RX tail register. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ I40EVF_WRITE_FLUSH(hw);
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
- rx_queue_id);
- else
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ return err;
}
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -1622,45 +1620,39 @@ i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct i40e_rx_queue *rxq;
int err;
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
-
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
- rx_queue_id);
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
}
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
static int
i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- int err = 0;
+ int err;
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
-
- /* Ready to switch the queue on */
- err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
-
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
- tx_queue_id);
- else
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -1669,22 +1661,19 @@ i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
struct i40e_tx_queue *txq;
int err;
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
-
- err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+ txq = dev->data->tx_queues[tx_queue_id];
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
- tx_queue_id);
- return err;
- }
-
- i40e_tx_queue_release_mbufs(txq);
- i40e_reset_tx_queue(txq);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
}
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 65518ad90..59a6a8adb 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1531,38 +1531,36 @@ int
i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct i40e_rx_queue *rxq;
- int err = -1;
+ int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40e_alloc_rx_queue_mbufs(rxq);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
- rte_wmb();
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
- /* Init the RX tail regieter. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ rte_wmb();
- err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
- rx_queue_id);
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- } else
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ return err;
}
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
int
@@ -1572,24 +1570,21 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = dev->data->rx_queues[rx_queue_id];
- /*
- * rx_queue_id is queue id application refers to, while
- * rxq->reg_idx is the real queue index.
- */
- err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
-
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
- rx_queue_id);
- return err;
- }
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ /*
+ * rx_queue_id is queue id application refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
}
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1597,28 +1592,27 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- int err = -1;
+ int err;
struct i40e_tx_queue *txq;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = dev->data->tx_queues[tx_queue_id];
- /*
- * tx_queue_id is queue id application refers to, while
- * rxq->reg_idx is the real queue index.
- */
- err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
- tx_queue_id);
- else
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /*
+ * tx_queue_id is queue id application refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
int
@@ -1628,26 +1622,23 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = dev->data->tx_queues[tx_queue_id];
- /*
- * tx_queue_id is queue id application refers to, while
- * txq->reg_idx is the real queue index.
- */
- err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
-
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
- tx_queue_id);
- return err;
- }
-
- i40e_tx_queue_release_mbufs(txq);
- i40e_reset_tx_queue(txq);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ /*
+ * tx_queue_id is queue id application refers to, while
+ * txq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
+ tx_queue_id);
+ return err;
}
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
--
2.17.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [PATCH 3/3] net/fm10k: remove redundant queue id checks
2018-07-18 12:35 [dpdk-dev] [PATCH 1/3] net/ixgbe: remove redundant queue id checks Ferruh Yigit
2018-07-18 12:35 ` [dpdk-dev] [PATCH 2/3] net/i40e: " Ferruh Yigit
@ 2018-07-18 12:35 ` Ferruh Yigit
2018-07-19 12:24 ` Zhang, Qi Z
2018-07-19 12:33 ` [dpdk-dev] [PATCH 1/3] net/ixgbe: " Zhang, Qi Z
2 siblings, 1 reply; 6+ messages in thread
From: Ferruh Yigit @ 2018-07-18 12:35 UTC (permalink / raw)
To: Wenzhuo Lu, Konstantin Ananyev, Qi Zhang, Xiao Wang
Cc: dev, Ferruh Yigit, Keith Wiles
remove queue id checks from dev_ops
fm10k_dev_[rx/tx]_queue_[start/stop]
queue id checks already done by ethdev APIs that are calling these
dev_ops
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
Cc: Keith Wiles <keith.wiles@intel.com>
---
drivers/net/fm10k/fm10k_ethdev.c | 119 ++++++++++++++-----------------
1 file changed, 54 insertions(+), 65 deletions(-)
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7c505451a..541a49b75 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -810,52 +810,50 @@ static int
fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int err = -1;
+ int err;
uint32_t reg;
struct fm10k_rx_queue *rxq;
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
- err = rx_queue_reset(rxq);
- if (err == -ENOMEM) {
- PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
- return err;
- } else if (err == -EINVAL) {
- PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
- " %d", err);
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
+ err = rx_queue_reset(rxq);
+ if (err == -ENOMEM) {
+ PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
+ return err;
+ } else if (err == -EINVAL) {
+ PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
+ " %d", err);
+ return err;
+ }
- /* Setup the HW Rx Head and Tail Descriptor Pointers
- * Note: this must be done AFTER the queue is enabled on real
- * hardware, but BEFORE the queue is enabled when using the
- * emulation platform. Do it in both places for now and remove
- * this comment and the following two register writes when the
- * emulation platform is no longer being used.
- */
- FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled on real
+ * hardware, but BEFORE the queue is enabled when using the
+ * emulation platform. Do it in both places for now and remove
+ * this comment and the following two register writes when the
+ * emulation platform is no longer being used.
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
- /* Set PF ownership flag for PF devices */
- reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
- if (hw->mac.type == fm10k_mac_pf)
- reg |= FM10K_RXQCTL_PF;
- reg |= FM10K_RXQCTL_ENABLE;
- /* enable RX queue */
- FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
- FM10K_WRITE_FLUSH(hw);
+ /* Set PF ownership flag for PF devices */
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
+ if (hw->mac.type == fm10k_mac_pf)
+ reg |= FM10K_RXQCTL_PF;
+ reg |= FM10K_RXQCTL_ENABLE;
+ /* enable RX queue */
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
+ FM10K_WRITE_FLUSH(hw);
- /* Setup the HW Rx Head and Tail Descriptor Pointers
- * Note: this must be done AFTER the queue is enabled
- */
- FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- }
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -865,14 +863,12 @@ fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- /* Disable RX queue */
- rx_queue_disable(hw, rx_queue_id);
+ /* Disable RX queue */
+ rx_queue_disable(hw, rx_queue_id);
- /* Free mbuf and clean HW ring */
- rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
+ /* Free mbuf and clean HW ring */
+ rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -884,28 +880,23 @@ fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
/** @todo - this should be defined in the shared code */
#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
- int err = 0;
+ struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
-
- q->ops->reset(q);
+ q->ops->reset(q);
- /* reset head and tail pointers */
- FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
+ /* reset head and tail pointers */
+ FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
- /* enable TX queue */
- FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
- FM10K_TXDCTL_ENABLE | txdctl);
- FM10K_WRITE_FLUSH(hw);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
- err = -1;
+ /* enable TX queue */
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
+ FM10K_TXDCTL_ENABLE | txdctl);
+ FM10K_WRITE_FLUSH(hw);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -915,11 +906,9 @@ fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- tx_queue_disable(hw, tx_queue_id);
- tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
+ tx_queue_disable(hw, tx_queue_id);
+ tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
--
2.17.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH 2/3] net/i40e: remove redundant queue id checks
2018-07-18 12:35 ` [dpdk-dev] [PATCH 2/3] net/i40e: " Ferruh Yigit
@ 2018-07-19 12:21 ` Zhang, Qi Z
0 siblings, 0 replies; 6+ messages in thread
From: Zhang, Qi Z @ 2018-07-19 12:21 UTC (permalink / raw)
To: Yigit, Ferruh, Lu, Wenzhuo, Ananyev, Konstantin, Xing, Beilei
Cc: dev, Wiles, Keith
> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, July 18, 2018 8:35 PM
> To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Zhang,
> Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Yigit, Ferruh <ferruh.yigit@intel.com>; Wiles, Keith
> <keith.wiles@intel.com>
> Subject: [PATCH 2/3] net/i40e: remove redundant queue id checks
>
> remove queue id checks from dev_ops
> i40e_dev_[rx/tx]_queue_[start/stop]
> i40evf_dev_[rx/tx]_queue_[start/stop]
>
> queue id checks already done by ethdev APIs that are calling these dev_ops
>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel.
Thanks!
Qi
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH 3/3] net/fm10k: remove redundant queue id checks
2018-07-18 12:35 ` [dpdk-dev] [PATCH 3/3] net/fm10k: " Ferruh Yigit
@ 2018-07-19 12:24 ` Zhang, Qi Z
0 siblings, 0 replies; 6+ messages in thread
From: Zhang, Qi Z @ 2018-07-19 12:24 UTC (permalink / raw)
To: Yigit, Ferruh, Lu, Wenzhuo, Ananyev, Konstantin, Wang, Xiao W
Cc: dev, Wiles, Keith
> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, July 18, 2018 8:35 PM
> To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Wang,
> Xiao W <xiao.w.wang@intel.com>
> Cc: dev@dpdk.org; Yigit, Ferruh <ferruh.yigit@intel.com>; Wiles, Keith
> <keith.wiles@intel.com>
> Subject: [PATCH 3/3] net/fm10k: remove redundant queue id checks
>
> remove queue id checks from dev_ops
> fm10k_dev_[rx/tx]_queue_[start/stop]
>
> queue id checks already done by ethdev APIs that are calling these dev_ops
>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel.
Thanks!
Qi
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH 1/3] net/ixgbe: remove redundant queue id checks
2018-07-18 12:35 [dpdk-dev] [PATCH 1/3] net/ixgbe: remove redundant queue id checks Ferruh Yigit
2018-07-18 12:35 ` [dpdk-dev] [PATCH 2/3] net/i40e: " Ferruh Yigit
2018-07-18 12:35 ` [dpdk-dev] [PATCH 3/3] net/fm10k: " Ferruh Yigit
@ 2018-07-19 12:33 ` Zhang, Qi Z
2 siblings, 0 replies; 6+ messages in thread
From: Zhang, Qi Z @ 2018-07-19 12:33 UTC (permalink / raw)
To: Yigit, Ferruh, Lu, Wenzhuo, Ananyev, Konstantin
Cc: dev, Yigit, Ferruh, Wiles, Keith
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ferruh Yigit
> Sent: Wednesday, July 18, 2018 8:35 PM
> To: Lu, Wenzhuo <wenzhuo.lu@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; Yigit, Ferruh <ferruh.yigit@intel.com>; Wiles, Keith
> <keith.wiles@intel.com>
> Subject: [dpdk-dev] [PATCH 1/3] net/ixgbe: remove redundant queue id checks
>
> remove queue id checks from dev_ops
> ixgbe_dev_[rx/tx]_queue_[start/stop]
>
> queue id checks already done by ethdev APIs that are calling these dev_ops
>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel.
Thanks!
Qi
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2018-07-19 12:33 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-07-18 12:35 [dpdk-dev] [PATCH 1/3] net/ixgbe: remove redundant queue id checks Ferruh Yigit
2018-07-18 12:35 ` [dpdk-dev] [PATCH 2/3] net/i40e: " Ferruh Yigit
2018-07-19 12:21 ` Zhang, Qi Z
2018-07-18 12:35 ` [dpdk-dev] [PATCH 3/3] net/fm10k: " Ferruh Yigit
2018-07-19 12:24 ` Zhang, Qi Z
2018-07-19 12:33 ` [dpdk-dev] [PATCH 1/3] net/ixgbe: " Zhang, Qi Z
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).