patches for DPDK stable branches
 help / color / mirror / Atom feed
* [PATCH 19.11 1/4] net/ena: fix offload capabilities verification
@ 2021-12-03 15:08 Michal Krawczyk
  2021-12-03 15:08 ` [PATCH 19.11 2/4] net/ena: fix per-queue offload capabilities Michal Krawczyk
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Michal Krawczyk @ 2021-12-03 15:08 UTC (permalink / raw)
  To: stable; +Cc: christian.ehrhardt, Michal Krawczyk, Igor Chauskin, Shai Brandes

[ upstream commit e8c838fde93f48c2a7504570aae38c06e3189fa1 ]

ENA PMD has multiple checksum offload flags, which are more discrete
than the DPDK offload capabilities flags.
As the driver wasn't storing it's internal checksum offload capabilities
and was relying only on the DPDK capabilities, not all scenarios could
be properly covered (like when to prepare pseudo header checksum and
when not).

Moreover, the user could request offload capability, which isn't
supported by the HW and the PMD would quietly ignore the issue.

This commit reworks eth_ena_prep_pkts() function to perform additional
checks and to properly reflect the HW requirements. With the
RTE_LIBRTE_ETHDEV_DEBUG enabled, the function will do even more
verifications, to help the user find any issues with the mbuf
configuration.

Fixes: b3fc5a1ae10d ("net/ena: add Tx preparation")
Cc: stable@dpdk.org

Signed-off-by: Michal Krawczyk <mk@semihalf.com>
Reviewed-by: Igor Chauskin <igorch@amazon.com>
Reviewed-by: Shai Brandes <shaibran@amazon.com>
---
 drivers/net/ena/ena_ethdev.c | 232 +++++++++++++++++++++++++++--------
 drivers/net/ena/ena_ethdev.h |   5 +-
 2 files changed, 185 insertions(+), 52 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 408e12751..a00d29aa1 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -147,6 +147,23 @@ static const struct ena_stats ena_stats_rx_strings[] = {
 #define	ENA_TX_OFFLOAD_NOTSUP_MASK	\
 	(PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
 
+/** HW specific offloads capabilities. */
+/* IPv4 checksum offload. */
+#define ENA_L3_IPV4_CSUM		0x0001
+/* TCP/UDP checksum offload for IPv4 packets. */
+#define ENA_L4_IPV4_CSUM		0x0002
+/* TCP/UDP checksum offload for IPv4 packets with pseudo header checksum. */
+#define ENA_L4_IPV4_CSUM_PARTIAL	0x0004
+/* TCP/UDP checksum offload for IPv6 packets. */
+#define ENA_L4_IPV6_CSUM		0x0008
+/* TCP/UDP checksum offload for IPv6 packets with pseudo header checksum. */
+#define ENA_L4_IPV6_CSUM_PARTIAL	0x0010
+/* TSO support for IPv4 packets. */
+#define ENA_IPV4_TSO			0x0020
+
+/* Device supports setting RSS hash. */
+#define ENA_RX_RSS_HASH			0x0040
+
 int ena_logtype_init;
 int ena_logtype_driver;
 
@@ -1671,6 +1688,50 @@ static int ena_calc_io_queue_num(struct ena_com_dev *ena_dev,
 	return io_queue_num;
 }
 
+static void
+ena_set_offloads(struct ena_offloads *offloads,
+		 struct ena_admin_feature_offload_desc *offload_desc)
+{
+	if (offload_desc->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
+		offloads->tx_offloads |= ENA_IPV4_TSO;
+
+	/* Tx IPv4 checksum offloads */
+	if (offload_desc->tx &
+	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)
+		offloads->tx_offloads |= ENA_L3_IPV4_CSUM;
+	if (offload_desc->tx &
+	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK)
+		offloads->tx_offloads |= ENA_L4_IPV4_CSUM;
+	if (offload_desc->tx &
+	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
+		offloads->tx_offloads |= ENA_L4_IPV4_CSUM_PARTIAL;
+
+	/* Tx IPv6 checksum offloads */
+	if (offload_desc->tx &
+	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK)
+		offloads->tx_offloads |= ENA_L4_IPV6_CSUM;
+	if (offload_desc->tx &
+	     ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
+		offloads->tx_offloads |= ENA_L4_IPV6_CSUM_PARTIAL;
+
+	/* Rx IPv4 checksum offloads */
+	if (offload_desc->rx_supported &
+	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)
+		offloads->rx_offloads |= ENA_L3_IPV4_CSUM;
+	if (offload_desc->rx_supported &
+	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
+		offloads->rx_offloads |= ENA_L4_IPV4_CSUM;
+
+	/* Rx IPv6 checksum offloads */
+	if (offload_desc->rx_supported &
+	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
+		offloads->rx_offloads |= ENA_L4_IPV6_CSUM;
+
+	if (offload_desc->rx_supported &
+	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK)
+		offloads->rx_offloads |= ENA_RX_RSS_HASH;
+}
+
 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
 {
 	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
@@ -1775,14 +1836,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
 	/* Set max MTU for this device */
 	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
 
-	/* set device support for offloads */
-	adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx &
-		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0;
-	adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx &
-		ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0;
-	adapter->offloads.rx_csum_supported =
-		(get_feat_ctx.offload.rx_supported &
-		ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0;
+	ena_set_offloads(&adapter->offloads, &get_feat_ctx.offload);
 
 	/* Copy MAC address and point DPDK to it */
 	eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr;
@@ -1936,24 +1990,29 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 			ETH_LINK_SPEED_100G;
 
 	/* Set Tx & Rx features available for device */
-	if (adapter->offloads.tso4_supported)
+	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
 		tx_feat	|= DEV_TX_OFFLOAD_TCP_TSO;
 
-	if (adapter->offloads.tx_csum_supported)
-		tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-			DEV_TX_OFFLOAD_UDP_CKSUM |
-			DEV_TX_OFFLOAD_TCP_CKSUM;
+	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
+		tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+	if (adapter->offloads.tx_offloads &
+	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
+	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
+		tx_feat |= DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
 
-	if (adapter->offloads.rx_csum_supported)
-		rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
-			DEV_RX_OFFLOAD_UDP_CKSUM  |
-			DEV_RX_OFFLOAD_TCP_CKSUM;
+	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
+		rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+	if (adapter->offloads.rx_offloads &
+	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
+		rx_feat |= DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
 
 	rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
 	tx_feat |= DEV_TX_OFFLOAD_MULTI_SEGS;
 
 	/* Inform framework about available features */
 	dev_info->rx_offload_capa = rx_feat;
+	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
 	dev_info->rx_queue_offload_capa = rx_feat;
 	dev_info->tx_offload_capa = tx_feat;
 	dev_info->tx_queue_offload_capa = tx_feat;
@@ -2114,45 +2173,60 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint32_t i;
 	struct rte_mbuf *m;
 	struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
+	struct ena_adapter *adapter = tx_ring->adapter;
 	struct rte_ipv4_hdr *ip_hdr;
 	uint64_t ol_flags;
+	uint64_t l4_csum_flag;
+	uint64_t dev_offload_capa;
 	uint16_t frag_field;
+	bool need_pseudo_csum;
 
+	dev_offload_capa = adapter->offloads.tx_offloads;
 	for (i = 0; i != nb_pkts; i++) {
 		m = tx_pkts[i];
 		ol_flags = m->ol_flags;
 
-		if (!(ol_flags & PKT_TX_IPV4))
+		/* Check if any offload flag was set */
+		if (ol_flags == 0)
 			continue;
 
-		/* If there was not L2 header length specified, assume it is
-		 * length of the ethernet header.
-		 */
-		if (unlikely(m->l2_len == 0))
-			m->l2_len = sizeof(struct rte_ether_hdr);
-
-		ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
-						 m->l2_len);
-		frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
-
-		if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) {
-			m->packet_type |= RTE_PTYPE_L4_NONFRAG;
-
-			/* If IPv4 header has DF flag enabled and TSO support is
-			 * disabled, partial chcecksum should not be calculated.
-			 */
-			if (!tx_ring->adapter->offloads.tso4_supported)
-				continue;
-		}
-
-		if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
-				(ol_flags & PKT_TX_L4_MASK) ==
-				PKT_TX_SCTP_CKSUM) {
+		l4_csum_flag = ol_flags & PKT_TX_L4_MASK;
+		/* SCTP checksum offload is not supported by the ENA. */
+		if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) ||
+		    l4_csum_flag == PKT_TX_SCTP_CKSUM) {
+			PMD_TX_LOG(DEBUG,
+				"mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n",
+				i, ol_flags);
 			rte_errno = ENOTSUP;
 			return i;
 		}
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
+		/* Check if requested offload is also enabled for the queue */
+		if ((ol_flags & PKT_TX_IP_CKSUM &&
+		     !(tx_ring->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) ||
+		    (l4_csum_flag == PKT_TX_TCP_CKSUM &&
+		     !(tx_ring->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) ||
+		    (l4_csum_flag == PKT_TX_UDP_CKSUM &&
+		     !(tx_ring->offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) {
+			PMD_TX_LOG(DEBUG,
+				"mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n",
+				i, m->nb_segs, tx_ring->id);
+			rte_errno = EINVAL;
+			return i;
+		}
+
+		/* The caller is obligated to set l2 and l3 len if any cksum
+		 * offload is enabled.
+		 */
+		if (unlikely(ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK) &&
+		    (m->l2_len == 0 || m->l3_len == 0))) {
+			PMD_TX_LOG(DEBUG,
+				"mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n",
+				i);
+			rte_errno = EINVAL;
+			return i;
+		}
 		ret = rte_validate_tx_offload(m);
 		if (ret != 0) {
 			rte_errno = -ret;
@@ -2160,16 +2234,76 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		}
 #endif
 
-		/* In case we are supposed to TSO and have DF not set (DF=0)
-		 * hardware must be provided with partial checksum, otherwise
-		 * it will take care of necessary calculations.
+		/* Verify HW support for requested offloads and determine if
+		 * pseudo header checksum is needed.
 		 */
+		need_pseudo_csum = false;
+		if (ol_flags & PKT_TX_IPV4) {
+			if (ol_flags & PKT_TX_IP_CKSUM &&
+			    !(dev_offload_capa & ENA_L3_IPV4_CSUM)) {
+				rte_errno = ENOTSUP;
+				return i;
+			}
 
-		ret = rte_net_intel_cksum_flags_prepare(m,
-			ol_flags & ~PKT_TX_TCP_SEG);
-		if (ret != 0) {
-			rte_errno = -ret;
-			return i;
+			if (ol_flags & PKT_TX_TCP_SEG &&
+			    !(dev_offload_capa & ENA_IPV4_TSO)) {
+				rte_errno = ENOTSUP;
+				return i;
+			}
+
+			/* Check HW capabilities and if pseudo csum is needed
+			 * for L4 offloads.
+			 */
+			if (l4_csum_flag != PKT_TX_L4_NO_CKSUM &&
+			    !(dev_offload_capa & ENA_L4_IPV4_CSUM)) {
+				if (dev_offload_capa &
+				    ENA_L4_IPV4_CSUM_PARTIAL) {
+					need_pseudo_csum = true;
+				} else {
+					rte_errno = ENOTSUP;
+					return i;
+				}
+			}
+
+			/* Parse the DF flag */
+			ip_hdr = rte_pktmbuf_mtod_offset(m,
+				struct rte_ipv4_hdr *, m->l2_len);
+			frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
+			if (frag_field & RTE_IPV4_HDR_DF_FLAG) {
+				m->packet_type |= RTE_PTYPE_L4_NONFRAG;
+			} else if (ol_flags & PKT_TX_TCP_SEG) {
+				/* In case we are supposed to TSO and have DF
+				 * not set (DF=0) hardware must be provided with
+				 * partial checksum.
+				 */
+				need_pseudo_csum = true;
+			}
+		} else if (ol_flags & PKT_TX_IPV6) {
+			/* There is no support for IPv6 TSO as for now. */
+			if (ol_flags & PKT_TX_TCP_SEG) {
+				rte_errno = ENOTSUP;
+				return i;
+			}
+
+			/* Check HW capabilities and if pseudo csum is needed */
+			if (l4_csum_flag != PKT_TX_L4_NO_CKSUM &&
+			    !(dev_offload_capa & ENA_L4_IPV6_CSUM)) {
+				if (dev_offload_capa &
+				    ENA_L4_IPV6_CSUM_PARTIAL) {
+					need_pseudo_csum = true;
+				} else {
+					rte_errno = ENOTSUP;
+					return i;
+				}
+			}
+		}
+
+		if (need_pseudo_csum) {
+			ret = rte_net_intel_cksum_flags_prepare(m, ol_flags);
+			if (ret != 0) {
+				rte_errno = -ret;
+				return i;
+			}
 		}
 	}
 
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index af5eeea28..a5b41b30f 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -142,9 +142,8 @@ struct ena_stats_dev {
 };
 
 struct ena_offloads {
-	bool tso4_supported;
-	bool tx_csum_supported;
-	bool rx_csum_supported;
+	uint32_t tx_offloads;
+	uint32_t rx_offloads;
 };
 
 /* board specific private data structure */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 19.11 2/4] net/ena: fix per-queue offload capabilities
  2021-12-03 15:08 [PATCH 19.11 1/4] net/ena: fix offload capabilities verification Michal Krawczyk
@ 2021-12-03 15:08 ` Michal Krawczyk
  2021-12-03 15:08 ` [PATCH 19.11 3/4] net/ena: advertise scattered Rx capability Michal Krawczyk
  2021-12-03 15:08 ` [PATCH 19.11 4/4] Revert "net/ena: trigger reset on Tx prepare failure" Michal Krawczyk
  2 siblings, 0 replies; 5+ messages in thread
From: Michal Krawczyk @ 2021-12-03 15:08 UTC (permalink / raw)
  To: stable; +Cc: christian.ehrhardt, Michal Krawczyk, Igor Chauskin, Shai Brandes

[ upstream commit 3a822d79c5da8ed65fd08a8188b9b7d4c35fe199 ]

As ENA currently doesn't support offloads which could be configured
per-queue, only per-port flags should be set.

In addition, to make the code cleaner, parsing appropriate offload
flags is encapsulated into helper functions, in a similar matter it's
done by the other PMDs.

[1] https://doc.dpdk.org/guides/prog_guide/
    poll_mode_drv.html?highlight=offloads#hardware-offload

Fixes: 7369f88f88c0 ("net/ena: convert to new Rx offloads API")
Fixes: 56b8b9b7e5d2 ("net/ena: convert to new Tx offloads API")
Cc: stable@dpdk.org

Signed-off-by: Michal Krawczyk <mk@semihalf.com>
Reviewed-by: Igor Chauskin <igorch@amazon.com>
Reviewed-by: Shai Brandes <shaibran@amazon.com>
---
 drivers/net/ena/ena_ethdev.c | 91 ++++++++++++++++++++++++------------
 1 file changed, 62 insertions(+), 29 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index a00d29aa1..5f15d55d4 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -229,6 +229,10 @@ static int ena_queue_start(struct ena_ring *ring);
 static int ena_queue_start_all(struct rte_eth_dev *dev,
 			       enum ena_ring_type ring_type);
 static void ena_stats_restart(struct rte_eth_dev *dev);
+static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter);
+static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter);
 static int ena_infos_get(struct rte_eth_dev *dev,
 			 struct rte_eth_dev_info *dev_info);
 static int ena_rss_reta_update(struct rte_eth_dev *dev,
@@ -1965,12 +1969,65 @@ static void ena_init_rings(struct ena_adapter *adapter)
 	}
 }
 
+static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
+{
+	uint64_t port_offloads = 0;
+
+	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
+		port_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+
+	if (adapter->offloads.rx_offloads &
+	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
+		port_offloads |=
+			DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
+
+	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
+		port_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+	port_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+	return port_offloads;
+}
+
+static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter)
+{
+	uint64_t port_offloads = 0;
+
+	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
+		port_offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+
+	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
+		port_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+	if (adapter->offloads.tx_offloads &
+	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
+	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
+		port_offloads |=
+			DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
+
+	port_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
+	return port_offloads;
+}
+
+static uint64_t ena_get_rx_queue_offloads(struct ena_adapter *adapter)
+{
+	RTE_SET_USED(adapter);
+
+	return 0;
+}
+
+static uint64_t ena_get_tx_queue_offloads(struct ena_adapter *adapter)
+{
+	RTE_SET_USED(adapter);
+
+	return 0;
+}
+
 static int ena_infos_get(struct rte_eth_dev *dev,
 			  struct rte_eth_dev_info *dev_info)
 {
 	struct ena_adapter *adapter;
 	struct ena_com_dev *ena_dev;
-	uint64_t rx_feat = 0, tx_feat = 0;
 
 	ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
 	ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
@@ -1989,33 +2046,11 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 			ETH_LINK_SPEED_50G  |
 			ETH_LINK_SPEED_100G;
 
-	/* Set Tx & Rx features available for device */
-	if (adapter->offloads.tx_offloads & ENA_IPV4_TSO)
-		tx_feat	|= DEV_TX_OFFLOAD_TCP_TSO;
-
-	if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM)
-		tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM;
-	if (adapter->offloads.tx_offloads &
-	    (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM |
-	     ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL))
-		tx_feat |= DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
-
-	if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM)
-		rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM;
-	if (adapter->offloads.rx_offloads &
-	    (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM))
-		rx_feat |= DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
-
-	rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-	tx_feat |= DEV_TX_OFFLOAD_MULTI_SEGS;
-
 	/* Inform framework about available features */
-	dev_info->rx_offload_capa = rx_feat;
-	if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH)
-		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
-	dev_info->rx_queue_offload_capa = rx_feat;
-	dev_info->tx_offload_capa = tx_feat;
-	dev_info->tx_queue_offload_capa = tx_feat;
+	dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter);
+	dev_info->tx_offload_capa = ena_get_tx_port_offloads(adapter);
+	dev_info->rx_queue_offload_capa = ena_get_rx_queue_offloads(adapter);
+	dev_info->tx_queue_offload_capa = ena_get_tx_queue_offloads(adapter);
 
 	dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP |
 					   ETH_RSS_UDP;
@@ -2028,8 +2063,6 @@ static int ena_infos_get(struct rte_eth_dev *dev,
 	dev_info->max_tx_queues = adapter->num_queues;
 	dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
 
-	adapter->tx_supported_offloads = tx_feat;
-	adapter->rx_supported_offloads = rx_feat;
 
 	dev_info->rx_desc_lim.nb_max = adapter->rx_ring_size;
 	dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 19.11 3/4] net/ena: advertise scattered Rx capability
  2021-12-03 15:08 [PATCH 19.11 1/4] net/ena: fix offload capabilities verification Michal Krawczyk
  2021-12-03 15:08 ` [PATCH 19.11 2/4] net/ena: fix per-queue offload capabilities Michal Krawczyk
@ 2021-12-03 15:08 ` Michal Krawczyk
  2021-12-03 15:08 ` [PATCH 19.11 4/4] Revert "net/ena: trigger reset on Tx prepare failure" Michal Krawczyk
  2 siblings, 0 replies; 5+ messages in thread
From: Michal Krawczyk @ 2021-12-03 15:08 UTC (permalink / raw)
  To: stable; +Cc: christian.ehrhardt, Michal Krawczyk, Igor Chauskin, Shai Brandes

[ upstream commit e2a6d08bef489215ebb77b1d3033875ada757cfa ]

ENA can't be forced to always pass single descriptor for the Rx packet.
Even if the passed buffer size is big enough to hold the data, we can't
make assumption that the HW won't use extra descriptor because of
internal optimizations. This assumption may be true, but only for some
of the FW revisions, which may differ depending on the used AWS instance
type.

As the scattered Rx support on the Rx path already exists, the driver
just needs to announce DEV_RX_OFFLOAD_SCATTER capability by turning on
the rte_eth_dev_data::scattered_rx option.

Fixes: 1173fca25af9 ("ena: add polling-mode driver")
Cc: stable@dpdk.org

Signed-off-by: Michal Krawczyk <mk@semihalf.com>
Reviewed-by: Igor Chauskin <igorch@amazon.com>
Reviewed-by: Shai Brandes <shaibran@amazon.com>
---
 drivers/net/ena/ena_ethdev.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 5f15d55d4..62c15f3c9 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -1937,8 +1937,14 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 
 	dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
 
+	/* Scattered Rx cannot be turned off in the HW, so this capability must
+	 * be forced.
+	 */
+	dev->data->scattered_rx = 1;
+
 	adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
 	adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
+
 	return 0;
 }
 
@@ -1985,6 +1991,7 @@ static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter)
 		port_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
 
 	port_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+	port_offloads |= DEV_RX_OFFLOAD_SCATTER;
 
 	return port_offloads;
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 19.11 4/4] Revert "net/ena: trigger reset on Tx prepare failure"
  2021-12-03 15:08 [PATCH 19.11 1/4] net/ena: fix offload capabilities verification Michal Krawczyk
  2021-12-03 15:08 ` [PATCH 19.11 2/4] net/ena: fix per-queue offload capabilities Michal Krawczyk
  2021-12-03 15:08 ` [PATCH 19.11 3/4] net/ena: advertise scattered Rx capability Michal Krawczyk
@ 2021-12-03 15:08 ` Michal Krawczyk
  2021-12-06  8:31   ` Christian Ehrhardt
  2 siblings, 1 reply; 5+ messages in thread
From: Michal Krawczyk @ 2021-12-03 15:08 UTC (permalink / raw)
  To: stable; +Cc: christian.ehrhardt, Michal Krawczyk

This reverts commit 91867d8536914374062693fd5fddc08ae267a41f.
ENA Tx logic in the DPDK v19.11 is a bit different than the one from
which the original commit was backported (DPDK v21.08). As calculations
of the available ring size is not very precise, ena_com_prepare_tx()
function can fail because the ring is full - it's not a fault in the
current logic, so it shouldn't print error logs nor trigger the device
reset.

Signed-off-by: Michal Krawczyk <mk@semihalf.com>
---
 drivers/net/ena/ena_ethdev.c | 5 -----
 1 file changed, 5 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 62c15f3c9..8f02ecde1 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -2533,12 +2533,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,
 					&ena_tx_ctx, &nb_hw_desc);
 		if (unlikely(rc)) {
-			PMD_DRV_LOG(ERR,
-				"Failed to prepare Tx buffers, rc: %d\n", rc);
 			++tx_ring->tx_stats.prepare_ctx_err;
-			tx_ring->adapter->reset_reason =
-				ENA_REGS_RESET_DRIVER_INVALID_STATE;
-			tx_ring->adapter->trigger_reset = true;
 			break;
 		}
 		tx_info->tx_descs = nb_hw_desc;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 19.11 4/4] Revert "net/ena: trigger reset on Tx prepare failure"
  2021-12-03 15:08 ` [PATCH 19.11 4/4] Revert "net/ena: trigger reset on Tx prepare failure" Michal Krawczyk
@ 2021-12-06  8:31   ` Christian Ehrhardt
  0 siblings, 0 replies; 5+ messages in thread
From: Christian Ehrhardt @ 2021-12-06  8:31 UTC (permalink / raw)
  To: Michal Krawczyk; +Cc: stable

On Fri, Dec 3, 2021 at 4:09 PM Michal Krawczyk <mk@semihalf.com> wrote:
>
> This reverts commit 91867d8536914374062693fd5fddc08ae267a41f.
> ENA Tx logic in the DPDK v19.11 is a bit different than the one from
> which the original commit was backported (DPDK v21.08). As calculations
> of the available ring size is not very precise, ena_com_prepare_tx()
> function can fail because the ring is full - it's not a fault in the
> current logic, so it shouldn't print error logs nor trigger the device
> reset.

Yeah, that is in 19.11.10 - thanks for having a deeper look and sending that!
Applied (together with the other three of this series) to the WIP
branch of 19.11.11


> Signed-off-by: Michal Krawczyk <mk@semihalf.com>
> ---
>  drivers/net/ena/ena_ethdev.c | 5 -----
>  1 file changed, 5 deletions(-)
>
> diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
> index 62c15f3c9..8f02ecde1 100644
> --- a/drivers/net/ena/ena_ethdev.c
> +++ b/drivers/net/ena/ena_ethdev.c
> @@ -2533,12 +2533,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>                 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq,
>                                         &ena_tx_ctx, &nb_hw_desc);
>                 if (unlikely(rc)) {
> -                       PMD_DRV_LOG(ERR,
> -                               "Failed to prepare Tx buffers, rc: %d\n", rc);
>                         ++tx_ring->tx_stats.prepare_ctx_err;
> -                       tx_ring->adapter->reset_reason =
> -                               ENA_REGS_RESET_DRIVER_INVALID_STATE;
> -                       tx_ring->adapter->trigger_reset = true;
>                         break;
>                 }
>                 tx_info->tx_descs = nb_hw_desc;
> --
> 2.25.1
>


-- 
Christian Ehrhardt
Staff Engineer, Ubuntu Server
Canonical Ltd

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-12-06  8:31 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-03 15:08 [PATCH 19.11 1/4] net/ena: fix offload capabilities verification Michal Krawczyk
2021-12-03 15:08 ` [PATCH 19.11 2/4] net/ena: fix per-queue offload capabilities Michal Krawczyk
2021-12-03 15:08 ` [PATCH 19.11 3/4] net/ena: advertise scattered Rx capability Michal Krawczyk
2021-12-03 15:08 ` [PATCH 19.11 4/4] Revert "net/ena: trigger reset on Tx prepare failure" Michal Krawczyk
2021-12-06  8:31   ` Christian Ehrhardt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).