From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4114A4547F; Mon, 17 Jun 2024 11:53:34 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 558164060A; Mon, 17 Jun 2024 11:53:31 +0200 (CEST) Received: from smtpbg154.qq.com (smtpbg154.qq.com [15.184.224.54]) by mails.dpdk.org (Postfix) with ESMTP id 46891402CA; Mon, 17 Jun 2024 11:53:27 +0200 (CEST) X-QQ-mid: bizesmtpsz13t1718618004takq9e X-QQ-Originating-IP: b5Zk/FEALvl31F2u/gzhxVE/XtI4ISBOI+C4bs+pM/Q= Received: from lap-jiawenwu.trustnetic.com ( [183.159.97.141]) by bizesmtp.qq.com (ESMTP) with id ; Mon, 17 Jun 2024 17:53:24 +0800 (CST) X-QQ-SSF: 0000000000000000000000000000000 X-QQ-GoodBg: 0 X-BIZMAIL-ID: 16105934957495104946 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu , stable@dpdk.org Subject: [PATCH 01/19] net/txgbe: fix to parse tunnel packets Date: Mon, 17 Jun 2024 17:53:11 +0800 Message-Id: <20240617095319.16664-2-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.21.0.windows.1 In-Reply-To: <20240617095319.16664-1-jiawenwu@trustnetic.com> References: <20240617095319.16664-1-jiawenwu@trustnetic.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtpsz:trustnetic.com:qybglogicsvrgz:qybglogicsvrgz8a-1 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The outer-ipv6 tunnel packet was parsed to the wrong packet type, remove the default RTE_PTYPE_L2_ETHER and RTE_PTYPE_L3_IPV4 flags for tunnel packets. And correct the calculation of tunnel length for GRE and GENEVE packets. Fixes: ca46fcd753b1 ("net/txgbe: support Tx with hardware offload") Fixes: e5ece1f467aa ("net/txgbe: fix VXLAN-GPE packet checksum") Fixes: 0e32d6edd479 ("net/txgbe: fix packet type to parse from offload flags") Fixes: 5bbaf75ed6df ("net/txgbe: fix GRE tunnel packet checksum") Cc: stable@dpdk.org Signed-off-by: Jiawen Wu --- drivers/net/txgbe/txgbe_rxtx.c | 69 ++++++++++++++++++---------------- 1 file changed, 37 insertions(+), 32 deletions(-) diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index 4b78e68a40..7731ad8491 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -586,26 +586,17 @@ tx_desc_ol_flags_to_ptype(uint64_t oflags) switch (oflags & RTE_MBUF_F_TX_TUNNEL_MASK) { case RTE_MBUF_F_TX_TUNNEL_VXLAN: case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE: - ptype |= RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | - RTE_PTYPE_TUNNEL_GRENAT; + ptype |= RTE_PTYPE_TUNNEL_GRENAT; break; case RTE_MBUF_F_TX_TUNNEL_GRE: - ptype |= RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | - RTE_PTYPE_TUNNEL_GRE; + ptype |= RTE_PTYPE_TUNNEL_GRE; break; case RTE_MBUF_F_TX_TUNNEL_GENEVE: - ptype |= RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | - RTE_PTYPE_TUNNEL_GENEVE; - ptype |= RTE_PTYPE_INNER_L2_ETHER; + ptype |= RTE_PTYPE_TUNNEL_GENEVE; break; case RTE_MBUF_F_TX_TUNNEL_IPIP: case RTE_MBUF_F_TX_TUNNEL_IP: - ptype |= RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | - RTE_PTYPE_TUNNEL_IP; + ptype |= RTE_PTYPE_TUNNEL_IP; break; } @@ -689,11 +680,20 @@ txgbe_xmit_cleanup(struct txgbe_tx_queue *txq) return 0; } +#define GRE_CHECKSUM_PRESENT 0x8000 +#define GRE_KEY_PRESENT 0x2000 +#define GRE_SEQUENCE_PRESENT 0x1000 +#define GRE_EXT_LEN 4 +#define GRE_SUPPORTED_FIELDS (GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\ + GRE_SEQUENCE_PRESENT) + static inline uint8_t txgbe_get_tun_len(struct rte_mbuf *mbuf) { struct txgbe_genevehdr genevehdr; const struct txgbe_genevehdr *gh; + const struct txgbe_grehdr *grh; + struct txgbe_grehdr grehdr; uint8_t tun_len; switch (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { @@ -706,11 +706,16 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf) + sizeof(struct txgbe_vxlanhdr); break; case RTE_MBUF_F_TX_TUNNEL_GRE: - tun_len = sizeof(struct txgbe_nvgrehdr); + tun_len = sizeof(struct txgbe_grehdr); + grh = rte_pktmbuf_read(mbuf, + mbuf->outer_l2_len + mbuf->outer_l3_len, + sizeof(grehdr), &grehdr); + if (grh->flags & rte_cpu_to_be_16(GRE_SUPPORTED_FIELDS)) + tun_len += GRE_EXT_LEN; break; case RTE_MBUF_F_TX_TUNNEL_GENEVE: - gh = rte_pktmbuf_read(mbuf, - mbuf->outer_l2_len + mbuf->outer_l3_len, + gh = rte_pktmbuf_read(mbuf, mbuf->outer_l2_len + + mbuf->outer_l3_len + sizeof(struct txgbe_udphdr), sizeof(genevehdr), &genevehdr); tun_len = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_genevehdr) @@ -724,27 +729,26 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf) } static inline uint8_t -txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt) +txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt, uint8_t tun_len) { - uint64_t l2_vxlan, l2_vxlan_mac, l2_vxlan_mac_vlan; - uint64_t l2_gre, l2_gre_mac, l2_gre_mac_vlan; + uint64_t inner_l2_len; uint8_t ptid = 0; - l2_vxlan = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr); - l2_vxlan_mac = l2_vxlan + sizeof(struct rte_ether_hdr); - l2_vxlan_mac_vlan = l2_vxlan_mac + sizeof(struct rte_vlan_hdr); + inner_l2_len = tx_pkt->l2_len - tun_len; - l2_gre = sizeof(struct txgbe_grehdr); - l2_gre_mac = l2_gre + sizeof(struct rte_ether_hdr); - l2_gre_mac_vlan = l2_gre_mac + sizeof(struct rte_vlan_hdr); - - if (tx_pkt->l2_len == l2_vxlan || tx_pkt->l2_len == l2_gre) + switch (inner_l2_len) { + case 0: ptid = TXGBE_PTID_TUN_EIG; - else if (tx_pkt->l2_len == l2_vxlan_mac || tx_pkt->l2_len == l2_gre_mac) + break; + case sizeof(struct rte_ether_hdr): ptid = TXGBE_PTID_TUN_EIGM; - else if (tx_pkt->l2_len == l2_vxlan_mac_vlan || - tx_pkt->l2_len == l2_gre_mac_vlan) + break; + case sizeof(struct rte_ether_hdr) + sizeof(struct rte_vlan_hdr): ptid = TXGBE_PTID_TUN_EIGMV; + break; + default: + ptid = TXGBE_PTID_TUN_EI; + } return ptid; } @@ -811,8 +815,6 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK; if (tx_ol_req) { tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req); - if (tx_offload.ptid & TXGBE_PTID_PKT_TUN) - tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt); tx_offload.l2_len = tx_pkt->l2_len; tx_offload.l3_len = tx_pkt->l3_len; tx_offload.l4_len = tx_pkt->l4_len; @@ -821,6 +823,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_offload.outer_l2_len = tx_pkt->outer_l2_len; tx_offload.outer_l3_len = tx_pkt->outer_l3_len; tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt); + if (tx_offload.ptid & TXGBE_PTID_PKT_TUN) + tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt, + tx_offload.outer_tun_len); #ifdef RTE_LIB_SECURITY if (use_ipsec) { -- 2.27.0