patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Mingjin Ye <mingjinx.ye@intel.com>
To: dev@dpdk.org
Cc: qiming.yang@intel.com, stable@dpdk.org, yidingx.zhou@intel.com,
	Mingjin Ye <mingjinx.ye@intel.com>,
	Jingjing Wu <jingjing.wu@intel.com>,
	Beilei Xing <beilei.xing@intel.com>
Subject: [PATCH v3] net/iavf: add debug checks for mbuf
Date: Mon, 13 Feb 2023 08:31:50 +0000	[thread overview]
Message-ID: <20230213083150.295194-1-mingjinx.ye@intel.com> (raw)
In-Reply-To: <20230202100350.668809-1-mingjinx.ye@intel.com>

The scalar Tx path would send wrong mbuf that causes the kernel driver to
fire the MDD event.

This patch adds mbuf detection in tx_prepare and enables it by defining
`RTE_ETHDEV_DEBUG_TX` macro to fix this issue. If validation fails,
rte_errno will be set to EINVAL and returned.

Fixes: 3fd32df381f8 ("net/iavf: check Tx packet with correct UP and queue")
Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction")
Fixes: f28fbd1e6b50 ("net/iavf: check max SIMD bitwidth")
Cc: stable@dpdk.org

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c | 645 +++++++++++++++++++++++++++++++++++
 1 file changed, 645 insertions(+)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 8d49967538..6a2ddf2aca 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -24,12 +24,55 @@
 #include <rte_ip.h>
 #include <rte_net.h>
 #include <rte_vect.h>
+#include <rte_vxlan.h>
+#include <rte_gtp.h>
+#include <rte_geneve.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
 #include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
+#define GRE_CHECKSUM_PRESENT	0x8000
+#define GRE_KEY_PRESENT		0x2000
+#define GRE_SEQUENCE_PRESENT	0x1000
+#define GRE_EXT_LEN		4
+#define GRE_SUPPORTED_FIELDS	(GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\
+				 GRE_SEQUENCE_PRESENT)
+
+#ifndef IPPROTO_IPIP
+#define IPPROTO_IPIP 4
+#endif
+#ifndef IPPROTO_GRE
+#define IPPROTO_GRE	47
+#endif
+
+static uint16_t vxlan_gpe_udp_port = RTE_VXLAN_GPE_DEFAULT_PORT;
+static uint16_t geneve_udp_port = RTE_GENEVE_DEFAULT_PORT;
+
+struct simple_gre_hdr {
+	uint16_t flags;
+	uint16_t proto;
+} __rte_packed;
+
+/* structure that caches offload info for the current packet */
+struct offload_info {
+	uint16_t ethertype;
+	uint8_t gso_enable;
+	uint16_t l2_len;
+	uint16_t l3_len;
+	uint16_t l4_len;
+	uint8_t l4_proto;
+	uint8_t is_tunnel;
+	uint16_t outer_ethertype;
+	uint16_t outer_l2_len;
+	uint16_t outer_l3_len;
+	uint8_t outer_l4_proto;
+	uint16_t tso_segsz;
+	uint16_t tunnel_tso_segsz;
+	uint32_t pkt_len;
+};
+
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
 
@@ -2949,6 +2992,600 @@ iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
 	}
 }
 
+/* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
+static inline void
+parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct offload_info *info)
+{
+	struct rte_tcp_hdr *tcp_hdr;
+
+	info->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
+	info->l4_proto = ipv4_hdr->next_proto_id;
+
+	/* only fill l4_len for TCP, it's useful for TSO */
+	if (info->l4_proto == IPPROTO_TCP) {
+		tcp_hdr = (struct rte_tcp_hdr *)
+			((char *)ipv4_hdr + info->l3_len);
+		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+	} else if (info->l4_proto == IPPROTO_UDP) {
+		info->l4_len = sizeof(struct rte_udp_hdr);
+	} else {
+		info->l4_len = 0;
+	}
+}
+
+/* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
+static inline void
+parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct offload_info *info)
+{
+	struct rte_tcp_hdr *tcp_hdr;
+
+	info->l3_len = sizeof(struct rte_ipv6_hdr);
+	info->l4_proto = ipv6_hdr->proto;
+
+	/* only fill l4_len for TCP, it's useful for TSO */
+	if (info->l4_proto == IPPROTO_TCP) {
+		tcp_hdr = (struct rte_tcp_hdr *)
+			((char *)ipv6_hdr + info->l3_len);
+		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+	} else if (info->l4_proto == IPPROTO_UDP) {
+		info->l4_len = sizeof(struct rte_udp_hdr);
+	} else {
+		info->l4_len = 0;
+	}
+}
+
+/*
+ * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
+ * ipproto. This function is able to recognize IPv4/IPv6 with optional VLAN
+ * headers. The l4_len argument is only set in case of TCP (useful for TSO).
+ */
+static inline void
+parse_ethernet(struct rte_ether_hdr *eth_hdr, struct offload_info *info)
+{
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_vlan_hdr *vlan_hdr;
+
+	info->l2_len = sizeof(struct rte_ether_hdr);
+	info->ethertype = eth_hdr->ether_type;
+
+	while (info->ethertype == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+	       info->ethertype == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
+		vlan_hdr = (struct rte_vlan_hdr *)
+			((char *)eth_hdr + info->l2_len);
+		info->l2_len  += sizeof(struct rte_vlan_hdr);
+		info->ethertype = vlan_hdr->eth_proto;
+	}
+
+	switch (info->ethertype) {
+	case RTE_STATIC_BSWAP16(RTE_ETHER_TYPE_IPV4):
+		ipv4_hdr = (struct rte_ipv4_hdr *)
+			((char *)eth_hdr + info->l2_len);
+		parse_ipv4(ipv4_hdr, info);
+		break;
+	case RTE_STATIC_BSWAP16(RTE_ETHER_TYPE_IPV6):
+		ipv6_hdr = (struct rte_ipv6_hdr *)
+			((char *)eth_hdr + info->l2_len);
+		parse_ipv6(ipv6_hdr, info);
+		break;
+	default:
+		info->l4_len = 0;
+		info->l3_len = 0;
+		info->l4_proto = 0;
+		break;
+	}
+}
+
+/* Fill in outer layers length */
+static inline void
+update_tunnel_outer(struct offload_info *info)
+{
+	info->is_tunnel = 1;
+	info->outer_ethertype = info->ethertype;
+	info->outer_l2_len = info->l2_len;
+	info->outer_l3_len = info->l3_len;
+	info->outer_l4_proto = info->l4_proto;
+}
+
+/*
+ * Parse a GTP protocol header.
+ * No optional fields and next extension header type.
+ */
+static inline void
+parse_gtp(struct rte_udp_hdr *udp_hdr,
+	  struct offload_info *info)
+{
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_gtp_hdr *gtp_hdr;
+	uint8_t gtp_len = sizeof(*gtp_hdr);
+	uint8_t ip_ver;
+
+	/* Check udp destination port. */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(RTE_GTPC_UDP_PORT) &&
+	    udp_hdr->src_port != rte_cpu_to_be_16(RTE_GTPC_UDP_PORT) &&
+	    udp_hdr->dst_port != rte_cpu_to_be_16(RTE_GTPU_UDP_PORT))
+		return;
+
+	update_tunnel_outer(info);
+	info->l2_len = 0;
+
+	gtp_hdr = (struct rte_gtp_hdr *)((char *)udp_hdr +
+		  sizeof(struct rte_udp_hdr));
+
+	/*
+	 * Check message type. If message type is 0xff, it is
+	 * a GTP data packet. If not, it is a GTP control packet
+	 */
+	if (gtp_hdr->msg_type == 0xff) {
+		ip_ver = *(uint8_t *)((char *)udp_hdr +
+			 sizeof(struct rte_udp_hdr) +
+			 sizeof(struct rte_gtp_hdr));
+		ip_ver = (ip_ver) & 0xf0;
+
+		if (ip_ver == RTE_GTP_TYPE_IPV4) {
+			ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gtp_hdr +
+				   gtp_len);
+			info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+			parse_ipv4(ipv4_hdr, info);
+		} else if (ip_ver == RTE_GTP_TYPE_IPV6) {
+			ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gtp_hdr +
+				   gtp_len);
+			info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+			parse_ipv6(ipv6_hdr, info);
+		}
+	} else {
+		info->ethertype = 0;
+		info->l4_len = 0;
+		info->l3_len = 0;
+		info->l4_proto = 0;
+	}
+
+	info->l2_len += RTE_ETHER_GTP_HLEN;
+}
+
+/* Parse a vxlan header */
+static inline void
+parse_vxlan(struct rte_udp_hdr *udp_hdr,
+	    struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+
+	/* check udp destination port, RTE_VXLAN_DEFAULT_PORT (4789) is the
+	 * default vxlan port (rfc7348) or that the rx offload flag is set
+	 * (i40e only currently)
+	 */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(RTE_VXLAN_DEFAULT_PORT))
+		return;
+
+	update_tunnel_outer(info);
+
+	eth_hdr = (struct rte_ether_hdr *)((char *)udp_hdr +
+		sizeof(struct rte_udp_hdr) +
+		sizeof(struct rte_vxlan_hdr));
+
+	parse_ethernet(eth_hdr, info);
+	info->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */
+}
+
+/* Parse a vxlan-gpe header */
+static inline void
+parse_vxlan_gpe(struct rte_udp_hdr *udp_hdr,
+	    struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_vxlan_gpe_hdr *vxlan_gpe_hdr;
+	uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
+
+	/* Check udp destination port. */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(vxlan_gpe_udp_port))
+		return;
+
+	vxlan_gpe_hdr = (struct rte_vxlan_gpe_hdr *)((char *)udp_hdr +
+				sizeof(struct rte_udp_hdr));
+
+	if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
+	    RTE_VXLAN_GPE_TYPE_IPV4) {
+		update_tunnel_outer(info);
+
+		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)vxlan_gpe_hdr +
+			   vxlan_gpe_len);
+
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+		info->l2_len = 0;
+
+	} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) {
+		update_tunnel_outer(info);
+
+		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)vxlan_gpe_hdr +
+			   vxlan_gpe_len);
+
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+		parse_ipv6(ipv6_hdr, info);
+		info->l2_len = 0;
+
+	} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) {
+		update_tunnel_outer(info);
+
+		eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_gpe_hdr +
+			  vxlan_gpe_len);
+
+		parse_ethernet(eth_hdr, info);
+	} else {
+		return;
+	}
+
+	info->l2_len += RTE_ETHER_VXLAN_GPE_HLEN;
+}
+
+/* Parse a geneve header */
+static inline void
+parse_geneve(struct rte_udp_hdr *udp_hdr,
+	    struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_geneve_hdr *geneve_hdr;
+	uint16_t geneve_len;
+
+	/* Check udp destination port. */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(geneve_udp_port))
+		return;
+
+	geneve_hdr = (struct rte_geneve_hdr *)((char *)udp_hdr +
+				sizeof(struct rte_udp_hdr));
+	geneve_len = sizeof(struct rte_geneve_hdr) + geneve_hdr->opt_len * 4;
+	if (!geneve_hdr->proto || geneve_hdr->proto ==
+	    rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+		update_tunnel_outer(info);
+		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)geneve_hdr +
+			   geneve_len);
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+		info->l2_len = 0;
+	} else if (geneve_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+		update_tunnel_outer(info);
+		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)geneve_hdr +
+			   geneve_len);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+		parse_ipv6(ipv6_hdr, info);
+		info->l2_len = 0;
+
+	} else if (geneve_hdr->proto == rte_cpu_to_be_16(RTE_GENEVE_TYPE_ETH)) {
+		update_tunnel_outer(info);
+		eth_hdr = (struct rte_ether_hdr *)((char *)geneve_hdr +
+			  geneve_len);
+		parse_ethernet(eth_hdr, info);
+	} else {
+		return;
+	}
+
+	info->l2_len +=
+		(sizeof(struct rte_udp_hdr) + sizeof(struct rte_geneve_hdr) +
+		((struct rte_geneve_hdr *)geneve_hdr)->opt_len * 4);
+}
+
+/* Parse a gre header */
+static inline void
+parse_gre(struct simple_gre_hdr *gre_hdr, struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	uint8_t gre_len = 0;
+
+	gre_len += sizeof(struct simple_gre_hdr);
+
+	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_KEY_PRESENT))
+		gre_len += GRE_EXT_LEN;
+	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_SEQUENCE_PRESENT))
+		gre_len += GRE_EXT_LEN;
+	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_CHECKSUM_PRESENT))
+		gre_len += GRE_EXT_LEN;
+
+	if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+		update_tunnel_outer(info);
+
+		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gre_hdr + gre_len);
+
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+		info->l2_len = 0;
+
+	} else if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+		update_tunnel_outer(info);
+
+		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gre_hdr + gre_len);
+
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+		parse_ipv6(ipv6_hdr, info);
+		info->l2_len = 0;
+
+	} else if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
+		update_tunnel_outer(info);
+
+		eth_hdr = (struct rte_ether_hdr *)((char *)gre_hdr + gre_len);
+
+		parse_ethernet(eth_hdr, info);
+	} else {
+		return;
+	}
+
+	info->l2_len += gre_len;
+}
+
+/* Parse an encapsulated ip or ipv6 header */
+static inline void
+parse_encap_ip(void *encap_ip, struct offload_info *info)
+{
+	struct rte_ipv4_hdr *ipv4_hdr = encap_ip;
+	struct rte_ipv6_hdr *ipv6_hdr = encap_ip;
+	uint8_t ip_version;
+
+	ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
+
+	if (ip_version != 4 && ip_version != 6)
+		return;
+
+	info->is_tunnel = 1;
+	info->outer_ethertype = info->ethertype;
+	info->outer_l2_len = info->l2_len;
+	info->outer_l3_len = info->l3_len;
+
+	if (ip_version == 4) {
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+	} else {
+		parse_ipv6(ipv6_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+	}
+	info->l2_len = 0;
+}
+
+static  inline int
+check_mbuf_len(struct offload_info *info, struct rte_mbuf *m)
+{
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+		if (info->outer_l2_len != m->outer_l2_len) {
+			PMD_TX_LOG(ERR, "outer_l2_len error in mbuf. Original "
+			"length: %hu, calculated length: %u", m->outer_l2_len,
+			info->outer_l2_len);
+			return -1;
+		}
+		if (info->outer_l3_len != m->outer_l3_len) {
+			PMD_TX_LOG(ERR, "outer_l3_len error in mbuf. Original "
+			"length: %hu,calculated length: %u", m->outer_l3_len,
+			info->outer_l3_len);
+			return -1;
+		}
+	}
+
+	if (info->l2_len != m->l2_len) {
+		PMD_TX_LOG(ERR, "l2_len error in mbuf. Original "
+		"length: %hu, calculated length: %u", m->l2_len,
+		info->l2_len);
+		return -1;
+	}
+	if (info->l3_len != m->l3_len) {
+		PMD_TX_LOG(ERR, "l3_len error in mbuf. Original "
+		"length: %hu, calculated length: %u", m->l3_len,
+		info->l3_len);
+		return -1;
+	}
+	if (info->l4_len != m->l4_len) {
+		PMD_TX_LOG(ERR, "l4_len error in mbuf. Original "
+		"length: %hu, calculated length: %u", m->l4_len,
+		info->l4_len);
+		return -1;
+	}
+
+	return 0;
+}
+
+static  inline int
+check_ether_type(struct offload_info *info, struct rte_mbuf *m)
+{
+	int ret = 0;
+
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+		if (info->outer_ethertype ==
+			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, "
+				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV4` flag.");
+				ret = -1;
+			}
+			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, tx "
+				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV6` flag");
+				ret = -1;
+			}
+		} else if (info->outer_ethertype ==
+			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, "
+				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV6` flag.");
+				ret = -1;
+			}
+			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, tx "
+				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV4` flag");
+				ret = -1;
+			}
+		}
+	}
+
+	if (info->ethertype ==
+		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV4)) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx offload "
+			"missing `RTE_MBUF_F_TX_IPV4` flag.");
+			ret = -1;
+		}
+		if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx "
+			"offload contains wrong `RTE_MBUF_F_TX_IPV6` flag");
+			ret = -1;
+		}
+	} else if (info->ethertype ==
+		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV6)) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			"missing `RTE_MBUF_F_TX_IPV6` flag.");
+			ret = -1;
+		}
+		if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			"contains wrong `RTE_MBUF_F_TX_IPV4` flag");
+			ret = -1;
+		}
+	}
+
+	return ret;
+}
+
+/* Check whether the parameters of mubf are correct. */
+__rte_unused static  inline int
+iavf_check_mbuf(struct rte_mbuf *m)
+{
+	struct rte_ether_hdr *eth_hdr;
+	void *l3_hdr = NULL; /* can be IPv4 or IPv6 */
+	struct offload_info info = {0};
+	uint64_t ol_flags = m->ol_flags;
+	uint64_t tunnel_type = ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
+
+	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+	parse_ethernet(eth_hdr, &info);
+	l3_hdr = (char *)eth_hdr + info.l2_len;
+	if (info.l4_proto == IPPROTO_UDP) {
+		struct rte_udp_hdr *udp_hdr;
+
+		udp_hdr = (struct rte_udp_hdr *)
+			((char *)l3_hdr + info.l3_len);
+		parse_gtp(udp_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "gtp tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_GTP` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GTP) {
+				PMD_TX_LOG(ERR, "gtp tunnel packet, tx offload has wrong "
+				"`%s` flag, correct is `RTE_MBUF_F_TX_TUNNEL_GTP` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+		parse_vxlan_gpe(udp_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE) {
+				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+		parse_vxlan(udp_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "vxlan tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_VXLAN` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN) {
+				PMD_TX_LOG(ERR, "vxlan tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_VXLAN` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+		parse_geneve(udp_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "geneve tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_GENEVE` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GENEVE) {
+				PMD_TX_LOG(ERR, "geneve tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_GENEVE` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+		/* Always keep last. */
+		if (unlikely(RTE_ETH_IS_TUNNEL_PKT(m->packet_type)
+			!= 0)) {
+			PMD_TX_LOG(ERR, "Unknown tunnel packet. UDP dst port: %hu",
+				udp_hdr->dst_port);
+				return -1;
+		}
+	} else if (info.l4_proto == IPPROTO_GRE) {
+		struct simple_gre_hdr *gre_hdr;
+
+		gre_hdr = (struct simple_gre_hdr *)((char *)l3_hdr +
+			info.l3_len);
+		parse_gre(gre_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "gre tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_GRE` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GRE) {
+				PMD_TX_LOG(ERR, "gre tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_GRE` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+	} else if (info.l4_proto == IPPROTO_IPIP) {
+		void *encap_ip_hdr;
+
+		encap_ip_hdr = (char *)l3_hdr + info.l3_len;
+		parse_encap_ip(encap_ip_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "Ipip tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_IPIP` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_IPIP) {
+				PMD_TX_LOG(ERR, "Ipip tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_IPIP` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+	}
+
+check_len:
+	if (check_mbuf_len(&info, m) != 0)
+		return -1;
+
+	return check_ether_type(&info, m);
+}
+
 /* TX prep functions */
 uint16_t
 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -3017,6 +3654,14 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 				return i;
 			}
 		}
+
+#ifdef RTE_ETHDEV_DEBUG_TX
+		ret = iavf_check_mbuf(m);
+		if (ret != 0) {
+			rte_errno = EINVAL;
+			return i;
+		}
+#endif
 	}
 
 	return i;
-- 
2.25.1


  parent reply	other threads:[~2023-02-13  8:35 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-31 10:06 [PATCH] net/iavf: add check " Mingjin Ye
2023-02-02 10:03 ` [PATCH v2] " Mingjin Ye
2023-02-07  8:57   ` Ye, MingjinX
2023-02-13  8:31   ` Mingjin Ye [this message]
2023-02-19  8:30     ` [PATCH v3] net/iavf: add debug checks " Zhang, Qi Z
2023-02-19 14:30       ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230213083150.295194-1-mingjinx.ye@intel.com \
    --to=mingjinx.ye@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=stable@dpdk.org \
    --cc=yidingx.zhou@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).