DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH v2] mlx5: Support for rte_eth_rx_queue_count
@ 2018-10-26 14:48 Tom Barbette
  2018-10-26 21:47 ` Yongseok Koh
  0 siblings, 1 reply; 2+ messages in thread
From: Tom Barbette @ 2018-10-26 14:48 UTC (permalink / raw)
  To: dev; +Cc: shahafs, Tom Barbette

This patch adds support for the rx_queue_count API in mlx5 driver

Changes in v2:
  * Fixed styling issues
  * Fix missing return

Signed-off-by: Tom Barbette <barbette@kth.se>
---
 drivers/net/mlx5/mlx5.c      |  1 +
 drivers/net/mlx5/mlx5_rxtx.c | 60 +++++++++++++++++++++++++++++++++++++-------
 drivers/net/mlx5/mlx5_rxtx.h |  1 +
 3 files changed, 53 insertions(+), 9 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index ec63bc6..6fccadd 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -375,6 +375,7 @@ const struct eth_dev_ops mlx5_dev_ops = {
 	.filter_ctrl = mlx5_dev_filter_ctrl,
 	.rx_descriptor_status = mlx5_rx_descriptor_status,
 	.tx_descriptor_status = mlx5_tx_descriptor_status,
+	.rx_queue_count = mlx5_rx_queue_count,
 	.rx_queue_intr_enable = mlx5_rx_intr_enable,
 	.rx_queue_intr_disable = mlx5_rx_intr_disable,
 	.is_removed = mlx5_is_removed,
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 2d14f8a..cafb084 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -417,20 +417,17 @@ mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
 }
 
 /**
- * DPDK callback to check the status of a rx descriptor.
+ * Internal function to compute the number of used descriptors in an RX queue
  *
- * @param rx_queue
- *   The rx queue.
- * @param[in] offset
- *   The index of the descriptor in the ring.
+ * @param tx_queue
+ *   The tx queue.
  *
  * @return
- *   The status of the tx descriptor.
+ *   The number of used rx descriptor.
  */
-int
-mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
+static uint32_t
+rx_queue_count(struct mlx5_rxq_data *rxq)
 {
-	struct mlx5_rxq_data *rxq = rx_queue;
 	struct rxq_zip *zip = &rxq->zip;
 	volatile struct mlx5_cqe *cqe;
 	const unsigned int cqe_n = (1 << rxq->cqe_n);
@@ -461,12 +458,57 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
 		cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
 	}
 	used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
+	return used;
+}
+
+/**
+ * DPDK callback to check the status of a rx descriptor.
+ *
+ * @param rx_queue
+ *   The rx queue.
+ * @param[in] offset
+ *   The index of the descriptor in the ring.
+ *
+ * @return
+ *   The status of the tx descriptor.
+ */
+int
+mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+	struct mlx5_rxq_data *rxq = rx_queue;
+
+	uint32_t used = rx_queue_count(rxq);
 	if (offset < used)
 		return RTE_ETH_RX_DESC_DONE;
 	return RTE_ETH_RX_DESC_AVAIL;
 }
 
 /**
+ * DPDK callback to get the number of used descriptors in a RX queue
+ *
+ * @param tx_queue
+ *   The tx queue.
+ *
+ * @return
+ *   The number of used rx descriptor.
+ *   -EINVAL if the queue is invalid
+ */
+uint32_t
+mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct priv *priv = dev->data->dev_private;
+	struct mlx5_rxq_data *rxq;
+
+	rxq = (*priv->rxqs)[rx_queue_id];
+	if (!rxq) {
+		rte_errno = EINVAL;
+		return -rte_errno;
+	}
+
+	return rx_queue_count(rxq);
+}
+
+/**
  * DPDK callback for TX.
  *
  * @param dpdk_txq
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 48ed2b2..c82059b 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -345,6 +345,7 @@ uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
 			  uint16_t pkts_n);
 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
+uint32_t mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 
 /* Vectorized version of mlx5_rxtx.c */
 int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
-- 
2.7.4

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [dpdk-dev] [PATCH v2] mlx5: Support for rte_eth_rx_queue_count
  2018-10-26 14:48 [dpdk-dev] [PATCH v2] mlx5: Support for rte_eth_rx_queue_count Tom Barbette
@ 2018-10-26 21:47 ` Yongseok Koh
  0 siblings, 0 replies; 2+ messages in thread
From: Yongseok Koh @ 2018-10-26 21:47 UTC (permalink / raw)
  To: Tom Barbette; +Cc: dev, Shahaf Shuler

On Fri, Oct 26, 2018 at 04:48:29PM +0200, Tom Barbette wrote:
> This patch adds support for the rx_queue_count API in mlx5 driver
> 
> Changes in v2:
>   * Fixed styling issues
>   * Fix missing return
> 
> Signed-off-by: Tom Barbette <barbette@kth.se>
> ---

Thank you for your contribution!
It is good but I have a few small comments.
Mostly cosmetic ones.

>  drivers/net/mlx5/mlx5.c      |  1 +
>  drivers/net/mlx5/mlx5_rxtx.c | 60 +++++++++++++++++++++++++++++++++++++-------
>  drivers/net/mlx5/mlx5_rxtx.h |  1 +
>  3 files changed, 53 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
> index ec63bc6..6fccadd 100644
> --- a/drivers/net/mlx5/mlx5.c
> +++ b/drivers/net/mlx5/mlx5.c
> @@ -375,6 +375,7 @@ const struct eth_dev_ops mlx5_dev_ops = {
>  	.filter_ctrl = mlx5_dev_filter_ctrl,
>  	.rx_descriptor_status = mlx5_rx_descriptor_status,
>  	.tx_descriptor_status = mlx5_tx_descriptor_status,
> +	.rx_queue_count = mlx5_rx_queue_count,
>  	.rx_queue_intr_enable = mlx5_rx_intr_enable,
>  	.rx_queue_intr_disable = mlx5_rx_intr_disable,
>  	.is_removed = mlx5_is_removed,
> diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
> index 2d14f8a..cafb084 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.c
> +++ b/drivers/net/mlx5/mlx5_rxtx.c
> @@ -417,20 +417,17 @@ mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
>  }
>  
>  /**
> - * DPDK callback to check the status of a rx descriptor.
> + * Internal function to compute the number of used descriptors in an RX queue
>   *
> - * @param rx_queue
> - *   The rx queue.

I know you just copied it but I'd like to fix it by this opportunity. :-)
Please change 'rx' to 'Rx'.  Same for 'tx'

> - * @param[in] offset
> - *   The index of the descriptor in the ring.
> + * @param tx_queue
> + *   The tx queue.

'tx' -> 'Rx'

>   *
>   * @return
> - *   The status of the tx descriptor.
> + *   The number of used rx descriptor.
>   */
> -int
> -mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
> +static uint32_t
> +rx_queue_count(struct mlx5_rxq_data *rxq)
>  {
> -	struct mlx5_rxq_data *rxq = rx_queue;
>  	struct rxq_zip *zip = &rxq->zip;
>  	volatile struct mlx5_cqe *cqe;
>  	const unsigned int cqe_n = (1 << rxq->cqe_n);
> @@ -461,12 +458,57 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
>  		cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
>  	}
>  	used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
> +	return used;
> +}
> +
> +/**
> + * DPDK callback to check the status of a rx descriptor.
> + *
> + * @param rx_queue
> + *   The rx queue.
> + * @param[in] offset
> + *   The index of the descriptor in the ring.
> + *
> + * @return
> + *   The status of the tx descriptor.

'tx' -> 'Rx'

> + */
> +int
> +mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
> +{
> +	struct mlx5_rxq_data *rxq = rx_queue;
> +

Please remove this blank line. We only allow a blank line between variable
declarations and the body.

> +	uint32_t used = rx_queue_count(rxq);

To be honest, there's a known issue that should've been fixed before. It is only
vaild for regular Rx burst. In mlx5, there are three types of rx_burst -
mlx5_rx_burst(), mlx5_rx_burst_mprq() and mlx5_rx_burst_vec(). You can refer to
mlx5_select_rx_function(). Among them, this works only with mlx5_rx_burst().

So, if you don't mind can you please add two sanity checks here?

{
	struct mlx5_rxq_ctrl *rxq_ctrl =
		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
	struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);

	if (dev->rx_pkt_burst != mlx5_rx_burst) {
		rte_errno = ENOTSUP;
		return -rte_errno;
	}
	if (offset >= (1 << rxq->elts_n)) {
		rte_errno = EINVAL;
		return -rte_errno;
	}
	if (offset < rx_queue_count(rxq))
		return RTE_ETH_RX_DESC_DONE;
	return RTE_ETH_RX_DESC_AVAIL;
}

>  
>  /**
> + * DPDK callback to get the number of used descriptors in a RX queue
> + *
> + * @param tx_queue
> + *   The tx queue.

Parameters are dev and rx_queue_id.

> + *
> + * @return
> + *   The number of used rx descriptor.
> + *   -EINVAL if the queue is invalid
> + */
> +uint32_t
> +mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
> +{
> +	struct priv *priv = dev->data->dev_private;
> +	struct mlx5_rxq_data *rxq;
> +
> +	rxq = (*priv->rxqs)[rx_queue_id];
> +	if (!rxq) {
> +		rte_errno = EINVAL;
> +		return -rte_errno;
> +	}

And please do the same check here.

	if (dev->rx_pkt_burst != mlx5_rx_burst) {
		rte_errno = ENOTSUP;
		return -rte_errno;
	}

> +

Please remove this blank line.


Thanks,
Yongseok

> +	return rx_queue_count(rxq);
> +}
> +
> +/**
>   * DPDK callback for TX.
>   *
>   * @param dpdk_txq
> diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
> index 48ed2b2..c82059b 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.h
> +++ b/drivers/net/mlx5/mlx5_rxtx.h
> @@ -345,6 +345,7 @@ uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
>  			  uint16_t pkts_n);
>  int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
>  int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
> +uint32_t mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
>  
>  /* Vectorized version of mlx5_rxtx.c */
>  int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
> -- 
> 2.7.4
> 

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2018-10-26 21:47 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-10-26 14:48 [dpdk-dev] [PATCH v2] mlx5: Support for rte_eth_rx_queue_count Tom Barbette
2018-10-26 21:47 ` Yongseok Koh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).