DPDK patches and discussions
 help / color / mirror / Atom feed
From: Michal Krawczyk <mk@semihalf.com>
To: dev@dpdk.org
Cc: mw@semihalf.com, mba@semihalf.com, gtzalik@amazon.com,
	evgenys@amazon.com, igorch@amazon.com, ferruh.yigit@intel.com,
	arybchenko@solarflare.com, Michal Krawczyk <mk@semihalf.com>
Subject: [dpdk-dev] [PATCH v3 18/30] net/ena: refactor getting IO queues capabilities
Date: Wed,  8 Apr 2020 10:29:09 +0200	[thread overview]
Message-ID: <20200408082921.31000-19-mk@semihalf.com> (raw)
In-Reply-To: <20200408082921.31000-1-mk@semihalf.com>

Reading values from the device is about the maximum capabilities of the
device. Because of that, the names of the fields storing those values,
functions and temporary variables, should be more descriptive in order
to improve self documentation fo the code.

In connection with this, the way of getting maximum queue size could be
simplified - no hardcoded values are needed, as the device is going to
send it's capabilities anyway.

Signed-off-by: Michal Krawczyk <mk@semihalf.com>
Reviewed-by: Igor Chauskin <igorch@amazon.com>
Reviewed-by: Guy Tzalik <gtzalik@amazon.com>
---
 drivers/net/ena/ena_ethdev.c | 101 ++++++++++++++++-------------------
 drivers/net/ena/ena_ethdev.h |  11 ++--
 2 files changed, 52 insertions(+), 60 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 62e26a2a16..d0cd0e49c8 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -82,9 +82,6 @@ struct ena_stats {
 #define ENA_STAT_GLOBAL_ENTRY(stat) \
 	ENA_STAT_ENTRY(stat, dev)
 
-#define ENA_MAX_RING_SIZE_RX 8192
-#define ENA_MAX_RING_SIZE_TX 1024
-
 /*
  * Each rte_memzone should have unique name.
  * To satisfy it, count number of allocation and add it to name.
@@ -845,29 +842,26 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)
 }
 
 static int
-ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
+ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
 {
 	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
 	struct ena_com_dev *ena_dev = ctx->ena_dev;
-	uint32_t tx_queue_size = ENA_MAX_RING_SIZE_TX;
-	uint32_t rx_queue_size = ENA_MAX_RING_SIZE_RX;
+	uint32_t max_tx_queue_size;
+	uint32_t max_rx_queue_size;
 
 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
 		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
 			&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
-		rx_queue_size = RTE_MIN(rx_queue_size,
-			max_queue_ext->max_rx_cq_depth);
-		rx_queue_size = RTE_MIN(rx_queue_size,
+		max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth,
 			max_queue_ext->max_rx_sq_depth);
-		tx_queue_size = RTE_MIN(tx_queue_size,
-			max_queue_ext->max_tx_cq_depth);
+		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
 
 		if (ena_dev->tx_mem_queue_type ==
 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
-			tx_queue_size = RTE_MIN(tx_queue_size,
+			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
 				llq->max_llq_depth);
 		} else {
-			tx_queue_size = RTE_MIN(tx_queue_size,
+			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
 				max_queue_ext->max_tx_sq_depth);
 		}
 
@@ -878,39 +872,36 @@ ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
 	} else {
 		struct ena_admin_queue_feature_desc *max_queues =
 			&ctx->get_feat_ctx->max_queues;
-		rx_queue_size = RTE_MIN(rx_queue_size,
-			max_queues->max_cq_depth);
-		rx_queue_size = RTE_MIN(rx_queue_size,
+		max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth,
 			max_queues->max_sq_depth);
-		tx_queue_size = RTE_MIN(tx_queue_size,
-			max_queues->max_cq_depth);
+		max_tx_queue_size = max_queues->max_cq_depth;
 
 		if (ena_dev->tx_mem_queue_type ==
 		    ENA_ADMIN_PLACEMENT_POLICY_DEV) {
-			tx_queue_size = RTE_MIN(tx_queue_size,
+			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
 				llq->max_llq_depth);
 		} else {
-			tx_queue_size = RTE_MIN(tx_queue_size,
+			max_tx_queue_size = RTE_MIN(max_tx_queue_size,
 				max_queues->max_sq_depth);
 		}
 
 		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
-			max_queues->max_packet_tx_descs);
-		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
 			max_queues->max_packet_rx_descs);
+		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
+			max_queues->max_packet_tx_descs);
 	}
 
 	/* Round down to the nearest power of 2 */
-	rx_queue_size = rte_align32prevpow2(rx_queue_size);
-	tx_queue_size = rte_align32prevpow2(tx_queue_size);
+	max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size);
+	max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size);
 
-	if (unlikely(rx_queue_size == 0 || tx_queue_size == 0)) {
+	if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) {
 		PMD_INIT_LOG(ERR, "Invalid queue size");
 		return -EFAULT;
 	}
 
-	ctx->rx_queue_size = rx_queue_size;
-	ctx->tx_queue_size = tx_queue_size;
+	ctx->max_tx_queue_size = max_tx_queue_size;
+	ctx->max_rx_queue_size = max_rx_queue_size;
 
 	return 0;
 }
@@ -1230,15 +1221,15 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (nb_desc > adapter->tx_ring_size) {
+	if (nb_desc > adapter->max_tx_ring_size) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported size of TX queue (max size: %d)\n",
-			adapter->tx_ring_size);
+			adapter->max_tx_ring_size);
 		return -EINVAL;
 	}
 
 	if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE)
-		nb_desc = adapter->tx_ring_size;
+		nb_desc = adapter->max_tx_ring_size;
 
 	txq->port_id = dev->data->port_id;
 	txq->next_to_clean = 0;
@@ -1310,7 +1301,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 	}
 
 	if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE)
-		nb_desc = adapter->rx_ring_size;
+		nb_desc = adapter->max_rx_ring_size;
 
 	if (!rte_is_power_of_2(nb_desc)) {
 		PMD_DRV_LOG(ERR,
@@ -1319,10 +1310,10 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
-	if (nb_desc > adapter->rx_ring_size) {
+	if (nb_desc > adapter->max_rx_ring_size) {
 		PMD_DRV_LOG(ERR,
 			"Unsupported size of RX queue (max size: %d)\n",
-			adapter->rx_ring_size);
+			adapter->max_rx_ring_size);
 		return -EINVAL;
 	}
 
@@ -1654,10 +1645,10 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter,
 	return 0;
 }
 
-static int ena_calc_io_queue_num(struct ena_com_dev *ena_dev,
-				 struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev,
+	struct ena_com_dev_get_features_ctx *get_feat_ctx)
 {
-	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num;
+	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
 
 	/* Regular queues capabilities */
 	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
@@ -1679,16 +1670,16 @@ static int ena_calc_io_queue_num(struct ena_com_dev *ena_dev,
 	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
 		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
 
-	io_queue_num = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
-	io_queue_num = RTE_MIN(io_queue_num, io_tx_sq_num);
-	io_queue_num = RTE_MIN(io_queue_num, io_tx_cq_num);
+	max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
+	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num);
+	max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num);
 
-	if (unlikely(io_queue_num == 0)) {
+	if (unlikely(max_num_io_queues == 0)) {
 		PMD_DRV_LOG(ERR, "Number of IO queues should not be 0\n");
 		return -EFAULT;
 	}
 
-	return io_queue_num;
+	return max_num_io_queues;
 }
 
 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
@@ -1701,6 +1692,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
 	struct ena_com_dev_get_features_ctx get_feat_ctx;
 	struct ena_llq_configurations llq_config;
 	const char *queue_type_str;
+	uint32_t max_num_io_queues;
 	int rc;
 
 	static int adapters_found;
@@ -1772,20 +1764,19 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
 
 	calc_queue_ctx.ena_dev = ena_dev;
 	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
-	adapter->num_queues = ena_calc_io_queue_num(ena_dev,
-						    &get_feat_ctx);
 
-	rc = ena_calc_queue_size(&calc_queue_ctx);
-	if (unlikely((rc != 0) || (adapter->num_queues <= 0))) {
+	max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx);
+	rc = ena_calc_io_queue_size(&calc_queue_ctx);
+	if (unlikely((rc != 0) || (max_num_io_queues == 0))) {
 		rc = -EFAULT;
 		goto err_device_destroy;
 	}
 
-	adapter->tx_ring_size = calc_queue_ctx.tx_queue_size;
-	adapter->rx_ring_size = calc_queue_ctx.rx_queue_size;
-
+	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
+	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
 	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
 	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
+	adapter->max_num_io_queues = max_num_io_queues;
 
 	/* prepare ring structures */
 	ena_init_rings(adapter);
@@ -1904,9 +1895,9 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 
 static void ena_init_rings(struct ena_adapter *adapter)
 {
-	int i;
+	size_t i;
 
-	for (i = 0; i < adapter->num_queues; i++) {
+	for (i = 0; i < adapter->max_num_io_queues; i++) {
 		struct ena_ring *ring = &adapter->tx_ring[i];
 
 		ring->configured = 0;
@@ -1918,7 +1909,7 @@ static void ena_init_rings(struct ena_adapter *adapter)
 		ring->sgl_size = adapter->max_tx_sgl_size;
 	}
 
-	for (i = 0; i < adapter->num_queues; i++) {
+	for (i = 0; i < adapter->max_num_io_queues; i++) {
 		struct ena_ring *ring = &adapter->rx_ring[i];
 
 		ring->configured = 0;
@@ -1982,21 +1973,21 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 	dev_info->max_rx_pktlen  = adapter->max_mtu;
 	dev_info->max_mac_addrs = 1;
 
-	dev_info->max_rx_queues = adapter->num_queues;
-	dev_info->max_tx_queues = adapter->num_queues;
+	dev_info->max_rx_queues = adapter->max_num_io_queues;
+	dev_info->max_tx_queues = adapter->max_num_io_queues;
 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
 
 	adapter->tx_supported_offloads = tx_feat;
 	adapter->rx_supported_offloads = rx_feat;
 
-	dev_info->rx_desc_lim.nb_max = adapter->rx_ring_size;
+	dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
 	dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
 	dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
 					adapter->max_rx_sgl_size);
 	dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
 					adapter->max_rx_sgl_size);
 
-	dev_info->tx_desc_lim.nb_max = adapter->tx_ring_size;
+	dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size;
 	dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
 	dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
 					adapter->max_tx_sgl_size);
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index e9b55dc029..1f320088ac 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -21,6 +21,7 @@
 #define ENA_NAME_MAX_LEN	20
 #define ENA_PKT_MAX_BUFS	17
 #define ENA_RX_BUF_MIN_SIZE	1400
+#define ENA_DEFAULT_RING_SIZE	1024
 
 #define ENA_MIN_MTU		128
 
@@ -46,8 +47,8 @@ struct ena_tx_buffer {
 struct ena_calc_queue_size_ctx {
 	struct ena_com_dev_get_features_ctx *get_feat_ctx;
 	struct ena_com_dev *ena_dev;
-	u16 rx_queue_size;
-	u16 tx_queue_size;
+	u32 max_rx_queue_size;
+	u32 max_tx_queue_size;
 	u16 max_tx_sgl_size;
 	u16 max_rx_sgl_size;
 };
@@ -159,15 +160,15 @@ struct ena_adapter {
 
 	/* TX */
 	struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
-	int tx_ring_size;
+	u32 max_tx_ring_size;
 	u16 max_tx_sgl_size;
 
 	/* RX */
 	struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
-	int rx_ring_size;
+	u32 max_rx_ring_size;
 	u16 max_rx_sgl_size;
 
-	u16 num_queues;
+	u32 max_num_io_queues;
 	u16 max_mtu;
 	struct ena_offloads offloads;
 
-- 
2.20.1


  parent reply	other threads:[~2020-04-08  8:32 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-08  8:28 [dpdk-dev] [PATCH v3 00/30] Update ENA driver to v2.1.0 Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 01/30] net/ena: check if size of buffer is at least 1400B Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 02/30] net/ena/base: make allocation macros thread-safe Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 03/30] net/ena/base: prevent allocation of 0-sized memory Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 04/30] net/ena/base: generate default, random RSS hash key Michal Krawczyk
2020-05-01 15:28   ` Ferruh Yigit
2020-05-01 22:56     ` David Christensen
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 05/30] net/ena/base: fix testing for supported hash func Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 06/30] net/ena/base: remove conversion of the ind tbl Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 07/30] net/ena/base: rework interrupt moderation Michal Krawczyk
2020-04-08  8:28 ` [dpdk-dev] [PATCH v3 08/30] net/ena/base: remove extra properties strings Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 09/30] net/ena/base: add accelerated LLQ mode Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 10/30] net/ena/base: fix documentation of the functions Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 11/30] net/ena/base: fix indentation in cq polling Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 12/30] net/ena/base: add error logs when preparing Tx Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 13/30] net/ena/base: use 48-bit memory addresses in ena_com Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 14/30] net/ena/base: fix types for printing timestamps Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 15/30] net/ena/base: fix indentation of multiple defines Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 16/30] net/ena/base: update gen date and commit Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 17/30] net/ena: set IO ring size to the valid value Michal Krawczyk
2020-04-08  8:29 ` Michal Krawczyk [this message]
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 19/30] net/ena: add support for large LLQ headers Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 20/30] net/ena: remove memory barriers before doorbells Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 21/30] net/ena: add Tx drops statistic Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 22/30] net/ena: disable meta caching Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 23/30] net/ena: refactor Rx path Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 24/30] net/ena: rework getting number of available descs Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 25/30] net/ena: limit refill threshold by fixed value Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 26/30] net/ena: use macros for ring idx operations Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 27/30] net/ena: refactor Tx path Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 28/30] net/ena: reuse 0 length Rx descriptor Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 29/30] doc: add notes on ENA usage on metal instances Michal Krawczyk
2020-04-08  8:29 ` [dpdk-dev] [PATCH v3 30/30] net/ena: update version of the driver to v2.1.0 Michal Krawczyk
2020-04-08 13:23 ` [dpdk-dev] [PATCH v3 00/30] Update ENA " Ferruh Yigit
2020-04-09 10:56   ` Michał Krawczyk
2020-04-09 11:40     ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200408082921.31000-19-mk@semihalf.com \
    --to=mk@semihalf.com \
    --cc=arybchenko@solarflare.com \
    --cc=dev@dpdk.org \
    --cc=evgenys@amazon.com \
    --cc=ferruh.yigit@intel.com \
    --cc=gtzalik@amazon.com \
    --cc=igorch@amazon.com \
    --cc=mba@semihalf.com \
    --cc=mw@semihalf.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).