patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>, stable@dpdk.org
Subject: [PATCH 4/8] net/ngbe: fix packet type to parse from offload flags
Date: Wed, 18 Jan 2023 14:00:35 +0800	[thread overview]
Message-ID: <20230118060039.3074016-5-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20230118060039.3074016-1-jiawenwu@trustnetic.com>

In some external applications, developers may fill in wrong packet_type
in rte_mbuf for transmission. It will result in Tx ring hang when Tx
checksum offload is on. So change it to parse from ol_flags. And remove
redundant tunnel type since the NIC does not support it.

Fixes: 9f3206140274 ("net/ngbe: support TSO")
Cc: stable@dpdk.org

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/ngbe/ngbe_rxtx.c | 87 +++++++++---------------------------
 1 file changed, 20 insertions(+), 67 deletions(-)

diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 9fd24fa444..09312cf40d 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -24,15 +24,11 @@
 
 /* Bit Mask to indicate what bits required for building Tx context */
 static const u64 NGBE_TX_OFFLOAD_MASK = (RTE_MBUF_F_TX_IP_CKSUM |
-		RTE_MBUF_F_TX_OUTER_IPV6 |
-		RTE_MBUF_F_TX_OUTER_IPV4 |
 		RTE_MBUF_F_TX_IPV6 |
 		RTE_MBUF_F_TX_IPV4 |
 		RTE_MBUF_F_TX_VLAN |
 		RTE_MBUF_F_TX_L4_MASK |
 		RTE_MBUF_F_TX_TCP_SEG |
-		RTE_MBUF_F_TX_TUNNEL_MASK |
-		RTE_MBUF_F_TX_OUTER_IP_CKSUM |
 		NGBE_TX_IEEE1588_TMST);
 
 #define NGBE_TX_OFFLOAD_NOTSUP_MASK \
@@ -333,34 +329,15 @@ ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq,
 	}
 
 	vlan_macip_lens = NGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
-
-	if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
-		tx_offload_mask.outer_tun_len |= ~0;
-		tx_offload_mask.outer_l2_len |= ~0;
-		tx_offload_mask.outer_l3_len |= ~0;
-		tx_offload_mask.l2_len |= ~0;
-		tunnel_seed = NGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
-		tunnel_seed |= NGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
-
-		switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
-		case RTE_MBUF_F_TX_TUNNEL_IPIP:
-			/* for non UDP / GRE tunneling, set to 0b */
-			break;
-		default:
-			PMD_TX_LOG(ERR, "Tunnel type not supported");
-			return;
-		}
-		vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.outer_l2_len);
-	} else {
-		tunnel_seed = 0;
-		vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len);
-	}
+	vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len);
 
 	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
 		tx_offload_mask.vlan_tci |= ~0;
 		vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci);
 	}
 
+	tunnel_seed = 0;
+
 	txq->ctx_cache[ctx_idx].flags = ol_flags;
 	txq->ctx_cache[ctx_idx].tx_offload.data[0] =
 		tx_offload_mask.data[0] & tx_offload.data[0];
@@ -449,16 +426,10 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 	return cmdtype;
 }
 
-static inline uint8_t
-tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
+static inline uint32_t
+tx_desc_ol_flags_to_ptype(uint64_t oflags)
 {
-	bool tun;
-
-	if (ptype)
-		return ngbe_encode_ptype(ptype);
-
-	/* Only support flags in NGBE_TX_OFFLOAD_MASK */
-	tun = !!(oflags & RTE_MBUF_F_TX_TUNNEL_MASK);
+	uint32_t ptype;
 
 	/* L2 level */
 	ptype = RTE_PTYPE_L2_ETHER;
@@ -466,41 +437,34 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
 		ptype |= RTE_PTYPE_L2_ETHER_VLAN;
 
 	/* L3 level */
-	if (oflags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM))
-		ptype |= RTE_PTYPE_L3_IPV4;
-	else if (oflags & (RTE_MBUF_F_TX_OUTER_IPV6))
-		ptype |= RTE_PTYPE_L3_IPV6;
-
 	if (oflags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM))
-		ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
+		ptype |= RTE_PTYPE_L3_IPV4;
 	else if (oflags & (RTE_MBUF_F_TX_IPV6))
-		ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
+		ptype |= RTE_PTYPE_L3_IPV6;
 
 	/* L4 level */
 	switch (oflags & (RTE_MBUF_F_TX_L4_MASK)) {
 	case RTE_MBUF_F_TX_TCP_CKSUM:
-		ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
+		ptype |= RTE_PTYPE_L4_TCP;
 		break;
 	case RTE_MBUF_F_TX_UDP_CKSUM:
-		ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
+		ptype |= RTE_PTYPE_L4_UDP;
 		break;
 	case RTE_MBUF_F_TX_SCTP_CKSUM:
-		ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
+		ptype |= RTE_PTYPE_L4_SCTP;
 		break;
 	}
 
 	if (oflags & RTE_MBUF_F_TX_TCP_SEG)
-		ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
-
-	/* Tunnel */
-	switch (oflags & RTE_MBUF_F_TX_TUNNEL_MASK) {
-	case RTE_MBUF_F_TX_TUNNEL_IPIP:
-	case RTE_MBUF_F_TX_TUNNEL_IP:
-		ptype |= RTE_PTYPE_L2_ETHER |
-			 RTE_PTYPE_L3_IPV4 |
-			 RTE_PTYPE_TUNNEL_IP;
-		break;
-	}
+		ptype |= RTE_PTYPE_L4_TCP;
+
+	return ptype;
+}
+
+static inline uint8_t
+tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
+{
+	ptype = tx_desc_ol_flags_to_ptype(oflags);
 
 	return ngbe_encode_ptype(ptype);
 }
@@ -629,9 +593,6 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			tx_offload.l4_len = tx_pkt->l4_len;
 			tx_offload.vlan_tci = tx_pkt->vlan_tci;
 			tx_offload.tso_segsz = tx_pkt->tso_segsz;
-			tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
-			tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
-			tx_offload.outer_tun_len = 0;
 
 			/* If new context need be built or reuse the exist ctx*/
 			ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
@@ -752,10 +713,6 @@ ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 				 */
 				pkt_len -= (tx_offload.l2_len +
 					tx_offload.l3_len + tx_offload.l4_len);
-				pkt_len -=
-					(tx_pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
-					? tx_offload.outer_l2_len +
-					  tx_offload.outer_l3_len : 0;
 			}
 
 			/*
@@ -1939,12 +1896,8 @@ ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 		RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM  |
-		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
 		RTE_ETH_TX_OFFLOAD_TCP_TSO     |
 		RTE_ETH_TX_OFFLOAD_UDP_TSO	   |
-		RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO	|
-		RTE_ETH_TX_OFFLOAD_IP_TNL_TSO	|
-		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO	|
 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 	if (hw->is_pf)
-- 
2.27.0


  parent reply	other threads:[~2023-01-18  6:04 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20230118060039.3074016-1-jiawenwu@trustnetic.com>
2023-01-18  6:00 ` [PATCH 1/8] net/txgbe: fix Rx buffer size in configure register Jiawen Wu
2023-01-27 15:36   ` Ferruh Yigit
2023-02-01  2:34     ` Jiawen Wu
2023-02-01 10:40       ` Ferruh Yigit
2023-01-18  6:00 ` [PATCH 2/8] net/txgbe: fix default signal quality value for KX/KX4 Jiawen Wu
2023-01-18  6:00 ` [PATCH 3/8] net/txgbe: fix packet type to parse from offload flags Jiawen Wu
2023-01-27 15:36   ` Ferruh Yigit
2023-02-01  3:14     ` Jiawen Wu
2023-02-01 10:41       ` Ferruh Yigit
2023-01-18  6:00 ` Jiawen Wu [this message]
2023-01-27 15:37   ` [PATCH 4/8] net/ngbe: " Ferruh Yigit
2023-01-18  6:00 ` [PATCH 5/8] net/ngbe: add spinlock protection on YT PHY Jiawen Wu
     [not found] ` <20230202092132.3271910-1-jiawenwu@trustnetic.com>
2023-02-02  9:21   ` [PATCH v2 01/10] net/ngbe: fix Rx buffer size in configure register Jiawen Wu
2023-02-08 10:28     ` Ferruh Yigit
2023-02-09  9:00       ` Jiawen Wu
2023-02-14  8:15         ` Jiawen Wu
2023-02-14  9:55           ` Ferruh Yigit
2023-02-15  9:35             ` Ferruh Yigit
2023-02-15 10:09               ` Jiawen Wu
2023-02-02  9:21   ` [PATCH v2 02/10] net/txgbe: " Jiawen Wu
2023-02-02  9:21   ` [PATCH v2 03/10] net/txgbe: fix default signal quality value for KX/KX4 Jiawen Wu
2023-02-02  9:21   ` [PATCH v2 04/10] net/txgbe: fix packet type to parse from offload flags Jiawen Wu
2023-02-02  9:21   ` [PATCH v2 05/10] net/ngbe: " Jiawen Wu
2023-02-02  9:21   ` [PATCH v2 06/10] net/ngbe: add spinlock protection on YT PHY Jiawen Wu
2023-02-02  9:21   ` [PATCH v2 09/10] net/txgbe: fix interrupt loss Jiawen Wu
2023-02-15  2:00 ` [PATCH v3] net/txgbe: fix Rx buffer size in configure register Jiawen Wu
2023-02-15 10:24   ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230118060039.3074016-5-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).