patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Michal Krawczyk <mk@semihalf.com>
To: stable@dpdk.org
Cc: christian.ehrhardt@canonical.com,
	Michal Krawczyk <mk@semihalf.com>,
	Igor Chauskin <igorch@amazon.com>,
	Shai Brandes <shaibran@amazon.com>
Subject: [PATCH 19.11 2/4] net/ena: fix per-queue offload capabilities
Date: Fri,  3 Dec 2021 16:08:57 +0100	[thread overview]
Message-ID: <20211203150859.3933-2-mk@semihalf.com> (raw)
In-Reply-To: <20211203150859.3933-1-mk@semihalf.com>

[ upstream commit 3a822d79c5da8ed65fd08a8188b9b7d4c35fe199 ]

As ENA currently doesn't support offloads which could be configured
per-queue, only per-port flags should be set.

In addition, to make the code cleaner, parsing appropriate offload
flags is encapsulated into helper functions, in a similar matter it's
done by the other PMDs.

[1] https://doc.dpdk.org/guides/prog_guide/
    poll_mode_drv.html?highlight=offloads#hardware-offload

Fixes: 7369f88f88c0 ("net/ena: convert to new Rx offloads API")
Fixes: 56b8b9b7e5d2 ("net/ena: convert to new Tx offloads API")
Cc: stable@dpdk.org

Signed-off-by: Michal Krawczyk <mk@semihalf.com>
Reviewed-by: Igor Chauskin <igorch@amazon.com>
Reviewed-by: Shai Brandes <shaibran@amazon.com>
---
 drivers/net/ena/ena_ethdev.c | 91 ++++++++++++++++++++++++------------
 1 file changed, 62 insertions(+), 29 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index a00d29aa1..5f15d55d4 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -229,6 +229,10 @@ static int ena_queue_start(struct ena_ring *ring);
 static int ena_queue_start_all(struct rte_eth_dev *dev,
 			       enum ena_ring_type ring_type);
 static void ena_stats_restart(struct rte_eth_dev *dev);
+static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter);
 static int ena_infos_get(struct rte_eth_dev *dev,
 			 struct rte_eth_dev_info *dev_info);
 static int ena_rss_reta_update(struct rte_eth_dev *dev,
@@ -1965,12 +1969,65 @@ static void ena_init_rings(struct ena_adapter *adapter)
 	}
 }
 
+static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
+{
+	uint64_t port_offloads = 0;
+
+	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
+		port_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+
+	if (adapter->offloads.rx_offloads &
+	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
+		port_offloads |=
+			DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
+
+	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
+		port_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+	port_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+	return port_offloads;
+}
+
+static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter)
+{
+	uint64_t port_offloads = 0;
+
+	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
+		port_offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+
+	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
+		port_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+	if (adapter->offloads.tx_offloads &
+	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
+	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
+		port_offloads |=
+			DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
+
+	port_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
+	return port_offloads;
+}
+
+static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter)
+{
+	RTE_SET_USED(adapter);
+
+	return 0;
+}
+
+static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter)
+{
+	RTE_SET_USED(adapter);
+
+	return 0;
+}
+
 static int ena_infos_get(struct rte_eth_dev *dev,
 			  struct rte_eth_dev_info *dev_info)
 {
 	struct ena_adapter *adapter;
 	struct ena_com_dev *ena_dev;
-	uint64_t rx_feat = 0, tx_feat = 0;
 
 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
@@ -1989,33 +2046,11 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 			ETH_LINK_SPEED_50G  |
 			ETH_LINK_SPEED_100G;
 
-	/* Set Tx & Rx features available for device */
-	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
-		tx_feat	|= DEV_TX_OFFLOAD_TCP_TSO;
-
-	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
-		tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM;
-	if (adapter->offloads.tx_offloads &
-	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
-	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
-		tx_feat |= DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
-
-	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
-		rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM;
-	if (adapter->offloads.rx_offloads &
-	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
-		rx_feat |= DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
-
-	rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-	tx_feat |= DEV_TX_OFFLOAD_MULTI_SEGS;
-
 	/* Inform framework about available features */
-	dev_info->rx_offload_capa = rx_feat;
-	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
-	dev_info->rx_queue_offload_capa = rx_feat;
-	dev_info->tx_offload_capa = tx_feat;
-	dev_info->tx_queue_offload_capa = tx_feat;
+	dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
+	dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter);
+	dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter);
+	dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter);
 
 	dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP |
 					   ETH_RSS_UDP;
@@ -2028,8 +2063,6 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 	dev_info->max_tx_queues = adapter->num_queues;
 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
 
-	adapter->tx_supported_offloads = tx_feat;
-	adapter->rx_supported_offloads = rx_feat;
 
 	dev_info->rx_desc_lim.nb_max = adapter->rx_ring_size;
 	dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
-- 
2.25.1


  reply	other threads:[~2021-12-03 15:09 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-12-03 15:08 [PATCH 19.11 1/4] net/ena: fix offload capabilities verification Michal Krawczyk
2021-12-03 15:08 ` Michal Krawczyk [this message]
2021-12-03 15:08 ` [PATCH 19.11 3/4] net/ena: advertise scattered Rx capability Michal Krawczyk
2021-12-03 15:08 ` [PATCH 19.11 4/4] Revert "net/ena: trigger reset on Tx prepare failure" Michal Krawczyk
2021-12-06  8:31   ` Christian Ehrhardt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211203150859.3933-2-mk@semihalf.com \
    --to=mk@semihalf.com \
    --cc=christian.ehrhardt@canonical.com \
    --cc=igorch@amazon.com \
    --cc=shaibran@amazon.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).