From: Michal Krawczyk <mk@semihalf.com>
To: dev@dpdk.org
Cc: mw@semihalf.com, mba@semihalf.com, gtzalik@amazon.com,
evgenys@amazon.com, igorch@amazon.com,
Michal Krawczyk <mk@semihalf.com>
Subject: [dpdk-dev] [PATCH 16/29] net/ena: refactor getting IO queues capabilities
Date: Fri, 27 Mar 2020 11:18:10 +0100 [thread overview]
Message-ID: <20200327101823.12646-17-mk@semihalf.com> (raw)
In-Reply-To: <20200327101823.12646-1-mk@semihalf.com>
Reading values from the device is about the maximum capabilities of the
device. Because of that, the names of the fields storing those values,
functions and temporary variables, should be more descriptive in order
to improve self documentation fo the code.
In connection with this, the way of getting maximum queue size could be
simplified - no hardcoded values are needed, as the device is going to
send it's capabilities anyway.
Signed-off-by: Michal Krawczyk <mk@semihalf.com>
Reviewed-by: Igor Chauskin <igorch@amazon.com>
Reviewed-by: Guy Tzalik <gtzalik@amazon.com>
---
drivers/net/ena/ena_ethdev.c | 101 ++++++++++++++++-------------------
drivers/net/ena/ena_ethdev.h | 11 ++--
2 files changed, 52 insertions(+), 60 deletions(-)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 5f9a44ff71..e1d1a9a7b6 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -82,9 +82,6 @@ struct ena_stats {
#define ENA_STAT_GLOBAL_ENTRY(stat) \
ENA_STAT_ENTRY(stat, dev)
-#define ENA_MAX_RING_SIZE_RX 8192
-#define ENA_MAX_RING_SIZE_TX 1024
-
/*
* Each rte_memzone should have unique name.
* To satisfy it, count number of allocation and add it to name.
@@ -844,29 +841,26 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)
}
static int
-ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
+ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
{
struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
struct ena_com_dev *ena_dev = ctx->ena_dev;
- uint32_t tx_queue_size = ENA_MAX_RING_SIZE_TX;
- uint32_t rx_queue_size = ENA_MAX_RING_SIZE_RX;
+ uint32_t max_tx_queue_size;
+ uint32_t max_rx_queue_size;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
- rx_queue_size = RTE_MIN(rx_queue_size,
- max_queue_ext->max_rx_cq_depth);
- rx_queue_size = RTE_MIN(rx_queue_size,
+ max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth,
max_queue_ext->max_rx_sq_depth);
- tx_queue_size = RTE_MIN(tx_queue_size,
- max_queue_ext->max_tx_cq_depth);
+ max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
if (ena_dev->tx_mem_queue_type ==
ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- tx_queue_size = RTE_MIN(tx_queue_size,
+ max_tx_queue_size = RTE_MIN(max_tx_queue_size,
llq->max_llq_depth);
} else {
- tx_queue_size = RTE_MIN(tx_queue_size,
+ max_tx_queue_size = RTE_MIN(max_tx_queue_size,
max_queue_ext->max_tx_sq_depth);
}
@@ -877,39 +871,36 @@ ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
} else {
struct ena_admin_queue_feature_desc *max_queues =
&ctx->get_feat_ctx->max_queues;
- rx_queue_size = RTE_MIN(rx_queue_size,
- max_queues->max_cq_depth);
- rx_queue_size = RTE_MIN(rx_queue_size,
+ max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth,
max_queues->max_sq_depth);
- tx_queue_size = RTE_MIN(tx_queue_size,
- max_queues->max_cq_depth);
+ max_tx_queue_size = max_queues->max_cq_depth;
if (ena_dev->tx_mem_queue_type ==
ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- tx_queue_size = RTE_MIN(tx_queue_size,
+ max_tx_queue_size = RTE_MIN(max_tx_queue_size,
llq->max_llq_depth);
} else {
- tx_queue_size = RTE_MIN(tx_queue_size,
+ max_tx_queue_size = RTE_MIN(max_tx_queue_size,
max_queues->max_sq_depth);
}
ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
- max_queues->max_packet_tx_descs);
- ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
max_queues->max_packet_rx_descs);
+ ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_tx_descs);
}
/* Round down to the nearest power of 2 */
- rx_queue_size = rte_align32prevpow2(rx_queue_size);
- tx_queue_size = rte_align32prevpow2(tx_queue_size);
+ max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size);
+ max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size);
- if (unlikely(rx_queue_size == 0 || tx_queue_size == 0)) {
+ if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) {
PMD_INIT_LOG(ERR, "Invalid queue size");
return -EFAULT;
}
- ctx->rx_queue_size = rx_queue_size;
- ctx->tx_queue_size = tx_queue_size;
+ ctx->max_tx_queue_size = max_tx_queue_size;
+ ctx->max_rx_queue_size = max_rx_queue_size;
return 0;
}
@@ -1229,15 +1220,15 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (nb_desc > adapter->tx_ring_size) {
+ if (nb_desc > adapter->max_tx_ring_size) {
PMD_DRV_LOG(ERR,
"Unsupported size of TX queue (max size: %d)\n",
- adapter->tx_ring_size);
+ adapter->max_tx_ring_size);
return -EINVAL;
}
if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE)
- nb_desc = adapter->tx_ring_size;
+ nb_desc = adapter->max_tx_ring_size;
txq->port_id = dev->data->port_id;
txq->next_to_clean = 0;
@@ -1309,7 +1300,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
}
if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE)
- nb_desc = adapter->rx_ring_size;
+ nb_desc = adapter->max_rx_ring_size;
if (!rte_is_power_of_2(nb_desc)) {
PMD_DRV_LOG(ERR,
@@ -1318,10 +1309,10 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (nb_desc > adapter->rx_ring_size) {
+ if (nb_desc > adapter->max_rx_ring_size) {
PMD_DRV_LOG(ERR,
"Unsupported size of RX queue (max size: %d)\n",
- adapter->rx_ring_size);
+ adapter->max_rx_ring_size);
return -EINVAL;
}
@@ -1655,10 +1646,10 @@ ena_set_queues_placement_policy(struct ena_adapter *adapter,
return 0;
}
-static int ena_calc_io_queue_num(struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
- uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num;
+ uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
/* Regular queues capabilities */
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
@@ -1680,16 +1671,16 @@ static int ena_calc_io_queue_num(struct ena_com_dev *ena_dev,
if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
- io_queue_num = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
- io_queue_num = RTE_MIN(io_queue_num, io_tx_sq_num);
- io_queue_num = RTE_MIN(io_queue_num, io_tx_cq_num);
+ max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
+ max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num);
+ max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num);
- if (unlikely(io_queue_num == 0)) {
+ if (unlikely(max_num_io_queues == 0)) {
PMD_DRV_LOG(ERR, "Number of IO queues should not be 0\n");
return -EFAULT;
}
- return io_queue_num;
+ return max_num_io_queues;
}
static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
@@ -1702,6 +1693,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_llq_configurations llq_config;
const char *queue_type_str;
+ uint32_t max_num_io_queues;
int rc;
static int adapters_found;
@@ -1773,20 +1765,19 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
calc_queue_ctx.ena_dev = ena_dev;
calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
- adapter->num_queues = ena_calc_io_queue_num(ena_dev,
- &get_feat_ctx);
- rc = ena_calc_queue_size(&calc_queue_ctx);
- if (unlikely((rc != 0) || (adapter->num_queues <= 0))) {
+ max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx);
+ rc = ena_calc_io_queue_size(&calc_queue_ctx);
+ if (unlikely((rc != 0) || (max_num_io_queues == 0))) {
rc = -EFAULT;
goto err_device_destroy;
}
- adapter->tx_ring_size = calc_queue_ctx.tx_queue_size;
- adapter->rx_ring_size = calc_queue_ctx.rx_queue_size;
-
+ adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
+ adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
+ adapter->max_num_io_queues = max_num_io_queues;
/* prepare ring structures */
ena_init_rings(adapter);
@@ -1905,9 +1896,9 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
static void ena_init_rings(struct ena_adapter *adapter)
{
- int i;
+ size_t i;
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->max_num_io_queues; i++) {
struct ena_ring *ring = &adapter->tx_ring[i];
ring->configured = 0;
@@ -1919,7 +1910,7 @@ static void ena_init_rings(struct ena_adapter *adapter)
ring->sgl_size = adapter->max_tx_sgl_size;
}
- for (i = 0; i < adapter->num_queues; i++) {
+ for (i = 0; i < adapter->max_num_io_queues; i++) {
struct ena_ring *ring = &adapter->rx_ring[i];
ring->configured = 0;
@@ -1983,21 +1974,21 @@ static int ena_infos_get(struct rte_eth_dev *dev,
dev_info->max_rx_pktlen = adapter->max_mtu;
dev_info->max_mac_addrs = 1;
- dev_info->max_rx_queues = adapter->num_queues;
- dev_info->max_tx_queues = adapter->num_queues;
+ dev_info->max_rx_queues = adapter->max_num_io_queues;
+ dev_info->max_tx_queues = adapter->max_num_io_queues;
dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
adapter->tx_supported_offloads = tx_feat;
adapter->rx_supported_offloads = rx_feat;
- dev_info->rx_desc_lim.nb_max = adapter->rx_ring_size;
+ dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
adapter->max_rx_sgl_size);
dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
adapter->max_rx_sgl_size);
- dev_info->tx_desc_lim.nb_max = adapter->tx_ring_size;
+ dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size;
dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
adapter->max_tx_sgl_size);
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index c1457defeb..99d1fba64d 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -21,6 +21,7 @@
#define ENA_NAME_MAX_LEN 20
#define ENA_PKT_MAX_BUFS 17
#define ENA_RX_BUF_MIN_SIZE 1400
+#define ENA_DEFAULT_RING_SIZE 1024
#define ENA_MIN_MTU 128
@@ -46,8 +47,8 @@ struct ena_tx_buffer {
struct ena_calc_queue_size_ctx {
struct ena_com_dev_get_features_ctx *get_feat_ctx;
struct ena_com_dev *ena_dev;
- u16 rx_queue_size;
- u16 tx_queue_size;
+ u32 max_rx_queue_size;
+ u32 max_tx_queue_size;
u16 max_tx_sgl_size;
u16 max_rx_sgl_size;
};
@@ -159,15 +160,15 @@ struct ena_adapter {
/* TX */
struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
- int tx_ring_size;
+ u32 max_tx_ring_size;
u16 max_tx_sgl_size;
/* RX */
struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
- int rx_ring_size;
+ u32 max_rx_ring_size;
u16 max_rx_sgl_size;
- u16 num_queues;
+ u32 max_num_io_queues;
u16 max_mtu;
struct ena_offloads offloads;
--
2.20.1
next prev parent reply other threads:[~2020-03-27 10:31 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-27 10:17 [dpdk-dev] [PATCH 00/29] Update ENA driver to v2.1.0 Michal Krawczyk
2020-03-27 10:17 ` [dpdk-dev] [PATCH 01/29] net/ena: check if size of buffer is at least 1400B Michal Krawczyk
2020-03-27 10:55 ` Andrew Rybchenko
2020-03-31 9:47 ` Michał Krawczyk
2020-03-27 14:51 ` Stephen Hemminger
2020-03-31 9:48 ` Michał Krawczyk
2020-03-27 10:17 ` [dpdk-dev] [PATCH 02/29] net/ena/base: make allocation macros thread-safe Michal Krawczyk
2020-03-27 14:54 ` Stephen Hemminger
2020-03-31 9:47 ` Michał Krawczyk
2020-03-27 10:17 ` [dpdk-dev] [PATCH 03/29] net/ena/base: prevent allocation of 0-sized memory Michal Krawczyk
2020-03-27 10:17 ` [dpdk-dev] [PATCH 04/29] net/ena/base: set default hash key Michal Krawczyk
2020-03-27 11:12 ` Andrew Rybchenko
2020-03-31 9:40 ` Michał Krawczyk
2020-03-31 9:51 ` Michał Krawczyk
2020-03-27 10:17 ` [dpdk-dev] [PATCH 05/29] net/ena/base: rework interrupt moderation Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 06/29] net/ena/base: remove extra properties strings Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 07/29] net/ena/base: add accelerated LLQ mode Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 08/29] net/ena/base: fix documentation of the functions Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 09/29] net/ena/base: fix indentation in cq polling Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 10/29] net/ena/base: add error logs when preparing Tx Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 11/29] net/ena/base: use 48-bit memory addresses in ena_com Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 12/29] net/ena/base: fix types for printing timestamps Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 13/29] net/ena/base: fix indentation of multiple defines Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 14/29] net/ena/base: update gen date and commit Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 15/29] net/ena: set IO ring size to the valid value Michal Krawczyk
2020-03-27 10:18 ` Michal Krawczyk [this message]
2020-03-27 10:18 ` [dpdk-dev] [PATCH 17/29] net/ena: add support for large LLQ headers Michal Krawczyk
2020-03-27 11:20 ` Andrew Rybchenko
2020-03-31 9:42 ` Michał Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 18/29] net/ena: remove memory barriers before doorbells Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 19/29] net/ena: add Tx drops statistic Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 20/29] net/ena: disable meta caching Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 21/29] net/ena: refactor Rx path Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 22/29] net/ena: rework getting number of available descs Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 23/29] net/ena: limit refill threshold by fixed value Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 24/29] net/ena: use macros for ring idx operations Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 25/29] net/ena: refactor Tx path Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 26/29] net/ena: reuse 0 length Rx descriptor Michal Krawczyk
2020-03-27 11:29 ` Andrew Rybchenko
2020-03-31 9:45 ` Michał Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 27/29] doc: add notes on ENA usage on metal instances Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 28/29] net/ena: update copyright date Michal Krawczyk
2020-03-27 10:18 ` [dpdk-dev] [PATCH 29/29] net/ena: update version of the driver to v2.1.0 Michal Krawczyk
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200327101823.12646-17-mk@semihalf.com \
--to=mk@semihalf.com \
--cc=dev@dpdk.org \
--cc=evgenys@amazon.com \
--cc=gtzalik@amazon.com \
--cc=igorch@amazon.com \
--cc=mba@semihalf.com \
--cc=mw@semihalf.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).