DPDK patches and discussions
 help / color / mirror / Atom feed
From: Michal Krawczyk <mk@semihalf.com>
To: dev@dpdk.org
Cc: gtzalik@dpdk.org, mw@dpdk.org, matua@amazon.com, rk@semihalf.com
Subject: [dpdk-dev] [PATCH 04/20] net/ena: add hw queues depth setup
Date: Fri, 14 Dec 2018 14:18:30 +0100	[thread overview]
Message-ID: <20181214131846.22439-5-mk@semihalf.com> (raw)
In-Reply-To: <20181214131846.22439-1-mk@semihalf.com>

From: Rafal Kozik <rk@semihalf.com>

The device now allows driver to reconfigure Tx and Rx queues depth
independently. Moreover, maximum size for Tx and Rx can be different.
Those maximum values are received from the device.

After reset, previous ring configuration is restored.

If number of descriptor is set to RTE_ETH_DEV_FALLBACK_RX_RINGSIZE
or RTE_ETH_DEV_FALLBACK_TX_RINGSIZE, the maximum value is restored.

Remove checks, if provided number is not too big, as this is done in
generic functions (rte_eth_rx_queue_setup and rte_eth_tx_queue_setup).

Maximum number of segments is being set for Rx packets and provided to
ena_com_rx_pkt() for validation.

Unused definitions were removed.

Signed-off-by: Rafal Kozik <rk@semihalf.com>
Acked-by: Michal Krawczyk <mk@semihalf.com>
---
 drivers/net/ena/ena_ethdev.c | 125 ++++++++++++++++++++++++++++---------------
 drivers/net/ena/ena_ethdev.h |  11 +++-
 2 files changed, 93 insertions(+), 43 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index a47f5f7fa..617f85f02 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -85,7 +85,6 @@

 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))

-#define ENA_MAX_RING_DESC	ENA_DEFAULT_RING_SIZE
 #define ENA_MIN_RING_DESC	128

 enum ethtool_stringset {
@@ -591,11 +590,12 @@ ena_dev_reset(struct rte_eth_dev *dev)
 	ena_com_admin_aenq_enable(ena_dev);

 	for (i = 0; i < nb_queues; ++i)
-		ena_rx_queue_setup(eth_dev, i, adapter->rx_ring_size, 0, NULL,
-			mb_pool_rx[i]);
+		ena_rx_queue_setup(eth_dev, i, adapter->rx_ring[i].ring_size, 0,
+			NULL, mb_pool_rx[i]);

 	for (i = 0; i < nb_queues; ++i)
-		ena_tx_queue_setup(eth_dev, i, adapter->tx_ring_size, 0, NULL);
+		ena_tx_queue_setup(eth_dev, i, adapter->tx_ring[i].ring_size, 0,
+			NULL);

 	adapter->trigger_reset = false;

@@ -935,34 +935,46 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)
 }

 static int
-ena_calc_queue_size(struct ena_com_dev *ena_dev,
-		    u16 *max_tx_sgl_size,
-		    struct ena_com_dev_get_features_ctx *get_feat_ctx)
+ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
 {
-	uint32_t queue_size = ENA_DEFAULT_RING_SIZE;
-
-	queue_size = RTE_MIN(queue_size,
-			     get_feat_ctx->max_queues.max_cq_depth);
-	queue_size = RTE_MIN(queue_size,
-			     get_feat_ctx->max_queues.max_sq_depth);
-
-	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
-		queue_size = RTE_MIN(queue_size,
-				     get_feat_ctx->max_queues.max_legacy_llq_depth);
-
-	/* Round down to power of 2 */
-	if (!rte_is_power_of_2(queue_size))
-		queue_size = rte_align32pow2(queue_size >> 1);
-
-	if (unlikely(queue_size == 0)) {
+	uint32_t tx_queue_size, rx_queue_size;
+
+	if (ctx->ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
+			&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
+		rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth,
+			max_queue_ext->max_rx_sq_depth);
+		tx_queue_size = RTE_MIN(max_queue_ext->max_tx_cq_depth,
+			max_queue_ext->max_tx_sq_depth);
+		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
+			max_queue_ext->max_per_packet_rx_descs);
+		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
+			max_queue_ext->max_per_packet_tx_descs);
+	} else {
+		struct ena_admin_queue_feature_desc *max_queues =
+			&ctx->get_feat_ctx->max_queues;
+		rx_queue_size = RTE_MIN(max_queues->max_cq_depth,
+			max_queues->max_sq_depth);
+		tx_queue_size = rx_queue_size;
+		ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
+			max_queues->max_packet_tx_descs);
+		ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
+			max_queues->max_packet_rx_descs);
+	}
+
+	/* Round down to the nearest power of 2 */
+	rx_queue_size = rte_align32prevpow2(rx_queue_size);
+	tx_queue_size = rte_align32prevpow2(tx_queue_size);
+
+	if (unlikely(rx_queue_size == 0 || tx_queue_size == 0)) {
 		PMD_INIT_LOG(ERR, "Invalid queue size");
 		return -EFAULT;
 	}

-	*max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
-		get_feat_ctx->max_queues.max_packet_tx_descs);
+	ctx->rx_queue_size = rx_queue_size;
+	ctx->tx_queue_size = tx_queue_size;

-	return queue_size;
+	return 0;
 }

 static void ena_stats_restart(struct rte_eth_dev *dev)
@@ -1239,6 +1251,9 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}

+	if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE)
+		nb_desc = adapter->tx_ring_size;
+
 	txq->port_id = dev->data->port_id;
 	txq->next_to_clean = 0;
 	txq->next_to_use = 0;
@@ -1297,6 +1312,9 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		return ENA_COM_FAULT;
 	}

+	if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE)
+		nb_desc = adapter->rx_ring_size;
+
 	if (!rte_is_power_of_2(nb_desc)) {
 		RTE_LOG(ERR, PMD,
 			"Unsupported size of RX queue: %d is not a power of 2.",
@@ -1574,13 +1592,26 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
 	}
 }

-static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev,
+static int ena_calc_io_queue_num(struct ena_com_dev *ena_dev,
 				 struct ena_com_dev_get_features_ctx *get_feat_ctx)
 {
-	int io_sq_num, io_cq_num, io_queue_num;
+	uint32_t io_sq_num, io_cq_num, io_queue_num;
+
+	/* Regular queues capabilities */
+	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
+			&get_feat_ctx->max_queue_ext.max_queue_ext;
+		io_sq_num = max_queue_ext->max_rx_sq_num;
+		io_sq_num = RTE_MIN(io_sq_num, max_queue_ext->max_tx_sq_num);

-	io_sq_num = get_feat_ctx->max_queues.max_sq_num;
-	io_cq_num = get_feat_ctx->max_queues.max_cq_num;
+		io_cq_num = max_queue_ext->max_rx_cq_num;
+		io_cq_num = RTE_MIN(io_cq_num, max_queue_ext->max_tx_cq_num);
+	} else {
+		struct ena_admin_queue_feature_desc *max_queues =
+			&get_feat_ctx->max_queues;
+		io_sq_num = max_queues->max_sq_num;
+		io_cq_num = max_queues->max_cq_num;
+	}

 	io_queue_num = RTE_MIN(io_sq_num, io_cq_num);

@@ -1594,14 +1625,14 @@ static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev,

 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
 {
+	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
 	struct rte_pci_device *pci_dev;
 	struct rte_intr_handle *intr_handle;
 	struct ena_adapter *adapter =
 		(struct ena_adapter *)(eth_dev->data->dev_private);
 	struct ena_com_dev *ena_dev = &adapter->ena_dev;
 	struct ena_com_dev_get_features_ctx get_feat_ctx;
-	int queue_size, rc;
-	u16 tx_sgl_size = 0;
+	int rc;

 	static int adapters_found;
 	bool wd_state;
@@ -1656,19 +1687,24 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
 	adapter->wd_state = wd_state;

 	ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+
+	calc_queue_ctx.ena_dev = ena_dev;
+	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
+
 	adapter->num_queues = ena_calc_io_queue_num(ena_dev,
 						    &get_feat_ctx);

-	queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx);
-	if (queue_size <= 0 || adapter->num_queues <= 0) {
+	rc = ena_calc_queue_size(&calc_queue_ctx);
+	if (unlikely((rc != 0) || (adapter->num_queues <= 0))) {
 		rc = -EFAULT;
 		goto err_device_destroy;
 	}

-	adapter->tx_ring_size = queue_size;
-	adapter->rx_ring_size = queue_size;
+	adapter->tx_ring_size = calc_queue_ctx.tx_queue_size;
+	adapter->rx_ring_size = calc_queue_ctx.rx_queue_size;

-	adapter->max_tx_sgl_size = tx_sgl_size;
+	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
+	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;

 	/* prepare ring structures */
 	ena_init_rings(adapter);
@@ -1785,6 +1821,7 @@ static void ena_init_rings(struct ena_adapter *adapter)
 		ring->type = ENA_RING_TYPE_RX;
 		ring->adapter = adapter;
 		ring->id = i;
+		ring->sgl_size = adapter->max_rx_sgl_size;
 	}
 }

@@ -1857,15 +1894,19 @@ static void ena_infos_get(struct rte_eth_dev *dev,
 	adapter->tx_supported_offloads = tx_feat;
 	adapter->rx_supported_offloads = rx_feat;

-	dev_info->rx_desc_lim.nb_max = ENA_MAX_RING_DESC;
+	dev_info->rx_desc_lim.nb_max = adapter->rx_ring_size;
 	dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
+	dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
+					adapter->max_rx_sgl_size);
+	dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
+					adapter->max_rx_sgl_size);

-	dev_info->tx_desc_lim.nb_max = ENA_MAX_RING_DESC;
+	dev_info->tx_desc_lim.nb_max = adapter->tx_ring_size;
 	dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
 	dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
-					feat.max_queues.max_packet_tx_descs);
+					adapter->max_tx_sgl_size);
 	dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
-					feat.max_queues.max_packet_tx_descs);
+					adapter->max_tx_sgl_size);
 }

 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -1901,7 +1942,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	for (completed = 0; completed < nb_pkts; completed++) {
 		int segments = 0;

-		ena_rx_ctx.max_bufs = rx_ring->ring_size;
+		ena_rx_ctx.max_bufs = rx_ring->sgl_size;
 		ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
 		ena_rx_ctx.descs = 0;
 		/* receive packet context */
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 322e90ace..e6f7bd012 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -45,7 +45,6 @@
 #define ENA_MEM_BAR	2

 #define ENA_MAX_NUM_QUEUES	128
-#define ENA_DEFAULT_RING_SIZE	(1024)
 #define ENA_MIN_FRAME_LEN	64
 #define ENA_NAME_MAX_LEN	20
 #define ENA_PKT_MAX_BUFS	17
@@ -71,6 +70,15 @@ struct ena_tx_buffer {
 	struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
 };

+struct ena_calc_queue_size_ctx {
+	struct ena_com_dev_get_features_ctx *get_feat_ctx;
+	struct ena_com_dev *ena_dev;
+	u16 rx_queue_size;
+	u16 tx_queue_size;
+	u16 max_tx_sgl_size;
+	u16 max_rx_sgl_size;
+};
+
 struct ena_ring {
 	u16 next_to_use;
 	u16 next_to_clean;
@@ -176,6 +184,7 @@ struct ena_adapter {
 	/* RX */
 	struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
 	int rx_ring_size;
+	u16 max_rx_sgl_size;

 	u16 num_queues;
 	u16 max_mtu;
--
2.14.1

  parent reply	other threads:[~2018-12-14 13:19 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-12-14 13:18 [dpdk-dev] [PATCH 00/20] net/ena: ENAv2 release Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 01/20] net/ena: update ena_com for the ENAv2 Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 02/20] net/ena: pass number of CPUs to the host info structure Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 03/20] net/ena: add reset reason in rx error Michal Krawczyk
2018-12-14 13:18 ` Michal Krawczyk [this message]
2018-12-14 13:18 ` [dpdk-dev] [PATCH 05/20] net/ena: skip packet with wrong req_id Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 06/20] net/ena: add LLQv2 support Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 07/20] net/ena: increase maximum Rx ring size Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 08/20] net/ena: call additional doorbells if needed Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 09/20] net/ena: destroy queues if start failed Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 10/20] net/ena: reset should not reconfigure queues Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 11/20] net/ena: adjust new line in log messages Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 12/20] net/ena: add supported RSS offloads types Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 13/20] net/ena: fix invalid reference to variable in union Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 14/20] net/ena: fix cleanup for out of order packets Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 15/20] net/ena: add per-queue software counters stats Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 16/20] net/ena: expose ENA-specific statistics in extended stats Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 17/20] net/ena: new way of getting rx drops Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 18/20] net/ena: update completion queue after cleanup Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 19/20] docs: update ENA PMD documentation and features Michal Krawczyk
2018-12-14 13:18 ` [dpdk-dev] [PATCH 20/20] net/ena: release of the ENAv2 driver (v2.0.0) Michal Krawczyk
2018-12-17 11:03 ` [dpdk-dev] [PATCH v2 14/20] net/ena: fix cleanup for out of order packets Michal Krawczyk
2018-12-17 11:06   ` [dpdk-dev] [PATCH v3 " Michal Krawczyk
2018-12-18 17:18 ` [dpdk-dev] [PATCH 00/20] net/ena: ENAv2 release Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181214131846.22439-5-mk@semihalf.com \
    --to=mk@semihalf.com \
    --cc=dev@dpdk.org \
    --cc=gtzalik@dpdk.org \
    --cc=matua@amazon.com \
    --cc=mw@dpdk.org \
    --cc=rk@semihalf.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).