DPDK patches and discussions
 help / color / mirror / Atom feed
From: Tom Barbette <barbette@kth.se>
To: dev@dpdk.org
Cc: shahafs@mellanox.com, yskoh@mellanox.com, Tom Barbette <barbette@kth.se>
Subject: [dpdk-dev] [PATCH v4] mlx5: Support for rte_eth_rx_queue_count
Date: Sat, 27 Oct 2018 17:10:55 +0200	[thread overview]
Message-ID: <1540653055-67051-1-git-send-email-barbette@kth.se> (raw)

This patch adds support for the rx_queue_count API in mlx5 driver

Changes in v2:
  * Fixed styling issues
  * Fix missing return

Changes in v3:
  * Fix styling comments and checks as per Yongseok Koh
    <yskoh@mellanox.com> comments. Thanks !

Changes in v4:
  * Fix compiling issue because of a line that disappeared in v3

Signed-off-by: Tom Barbette <barbette@kth.se>
---
 drivers/net/mlx5/mlx5.c      |  1 +
 drivers/net/mlx5/mlx5_rxtx.c | 78 ++++++++++++++++++++++++++++++++++++++------
 drivers/net/mlx5/mlx5_rxtx.h |  1 +
 3 files changed, 70 insertions(+), 10 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index ec63bc6..6fccadd 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -375,6 +375,7 @@ const struct eth_dev_ops mlx5_dev_ops = {
 	.filter_ctrl = mlx5_dev_filter_ctrl,
 	.rx_descriptor_status = mlx5_rx_descriptor_status,
 	.tx_descriptor_status = mlx5_tx_descriptor_status,
+	.rx_queue_count = mlx5_rx_queue_count,
 	.rx_queue_intr_enable = mlx5_rx_intr_enable,
 	.rx_queue_intr_disable = mlx5_rx_intr_disable,
 	.is_removed = mlx5_is_removed,
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 2d14f8a..2126205 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -417,20 +417,17 @@ mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
 }
 
 /**
- * DPDK callback to check the status of a rx descriptor.
+ * Internal function to compute the number of used descriptors in an RX queue
  *
- * @param rx_queue
- *   The rx queue.
- * @param[in] offset
- *   The index of the descriptor in the ring.
+ * @param rxq
+ *   The Rx queue.
  *
  * @return
- *   The status of the tx descriptor.
+ *   The number of used rx descriptor.
  */
-int
-mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
+static uint32_t
+rx_queue_count(struct mlx5_rxq_data *rxq)
 {
-	struct mlx5_rxq_data *rxq = rx_queue;
 	struct rxq_zip *zip = &rxq->zip;
 	volatile struct mlx5_cqe *cqe;
 	const unsigned int cqe_n = (1 << rxq->cqe_n);
@@ -461,12 +458,73 @@ mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
 		cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
 	}
 	used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
-	if (offset < used)
+	return used;
+}
+
+/**
+ * DPDK callback to check the status of a rx descriptor.
+ *
+ * @param rx_queue
+ *   The Rx queue.
+ * @param[in] offset
+ *   The index of the descriptor in the ring.
+ *
+ * @return
+ *   The status of the tx descriptor.
+ */
+int
+mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+	struct mlx5_rxq_data *rxq = rx_queue;
+	struct mlx5_rxq_ctrl *rxq_ctrl =
+			container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+	struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
+
+	if (dev->rx_pkt_burst != mlx5_rx_burst) {
+		rte_errno = ENOTSUP;
+		return -rte_errno;
+	}
+	if (offset >= (1 << rxq->elts_n)) {
+		rte_errno = EINVAL;
+		return -rte_errno;
+	}
+	if (offset < rx_queue_count(rxq))
 		return RTE_ETH_RX_DESC_DONE;
 	return RTE_ETH_RX_DESC_AVAIL;
 }
 
 /**
+ * DPDK callback to get the number of used descriptors in a RX queue
+ *
+ * @param dev
+ *   Pointer to the device structure.
+ *
+ * @param rx_queue_id
+ *   The Rx queue.
+ *
+ * @return
+ *   The number of used rx descriptor.
+ *   -EINVAL if the queue is invalid
+ */
+uint32_t
+mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct priv *priv = dev->data->dev_private;
+	struct mlx5_rxq_data *rxq;
+
+	if (dev->rx_pkt_burst != mlx5_rx_burst) {
+		rte_errno = ENOTSUP;
+		return -rte_errno;
+	}
+	rxq = (*priv->rxqs)[rx_queue_id];
+	if (!rxq) {
+		rte_errno = EINVAL;
+		return -rte_errno;
+	}
+	return rx_queue_count(rxq);
+}
+
+/**
  * DPDK callback for TX.
  *
  * @param dpdk_txq
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 48ed2b2..c82059b 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -345,6 +345,7 @@ uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
 			  uint16_t pkts_n);
 int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
+uint32_t mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 
 /* Vectorized version of mlx5_rxtx.c */
 int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
-- 
2.7.4

             reply	other threads:[~2018-10-27 15:10 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-27 15:10 Tom Barbette [this message]
2018-10-28  8:58 ` Tom Barbette
2018-10-28  9:37 ` Shahaf Shuler
2018-10-31  9:01   ` Tom Barbette
2018-11-01  7:21     ` Shahaf Shuler
2018-11-05  9:01       ` Tom Barbette
2018-11-05  9:55         ` Olivier Matz
2018-11-05 13:18           ` Shahaf Shuler

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1540653055-67051-1-git-send-email-barbette@kth.se \
    --to=barbette@kth.se \
    --cc=dev@dpdk.org \
    --cc=shahafs@mellanox.com \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).