patches for DPDK stable branches
 help / color / mirror / Atom feed
* [PATCH] net/iavf: add check for mbuf
@ 2023-01-31 10:06 Mingjin Ye
  2023-02-02 10:03 ` [PATCH v2] " Mingjin Ye
  0 siblings, 1 reply; 6+ messages in thread
From: Mingjin Ye @ 2023-01-31 10:06 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, yidingx.zhou, Mingjin Ye, stable, Jingjing Wu, Beilei Xing

The scalar Tx path would send wrong mbuf that causes the kernel driver to
fire the MDD event.

This patch adds mbuf detection in tx_prepare to fix this issue, rte_errno
will be set to EINVAL and returned if the verification fails.

Fixes: 3fd32df381f8 ("net/iavf: check Tx packet with correct UP and queue")
Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction")
Fixes: f28fbd1e6b50 ("net/iavf: check max SIMD bitwidth")
Cc: stable@dpdk.org

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c | 599 +++++++++++++++++++++++++++++++++++
 1 file changed, 599 insertions(+)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 8d49967538..63012fdc28 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -24,12 +24,55 @@
 #include <rte_ip.h>
 #include <rte_net.h>
 #include <rte_vect.h>
+#include <rte_vxlan.h>
+#include <rte_gtp.h>
+#include <rte_geneve.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
 #include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
+#define GRE_CHECKSUM_PRESENT	0x8000
+#define GRE_KEY_PRESENT		0x2000
+#define GRE_SEQUENCE_PRESENT	0x1000
+#define GRE_EXT_LEN		4
+#define GRE_SUPPORTED_FIELDS	(GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\
+				 GRE_SEQUENCE_PRESENT)
+
+#ifndef IPPROTO_IPIP
+#define IPPROTO_IPIP 4
+#endif
+#ifndef IPPROTO_GRE
+#define IPPROTO_GRE	47
+#endif
+
+static uint16_t vxlan_gpe_udp_port = RTE_VXLAN_GPE_DEFAULT_PORT;
+static uint16_t geneve_udp_port = RTE_GENEVE_DEFAULT_PORT;
+
+struct simple_gre_hdr {
+	uint16_t flags;
+	uint16_t proto;
+} __rte_packed;
+
+/* structure that caches offload info for the current packet */
+struct offload_info {
+	uint16_t ethertype;
+	uint8_t gso_enable;
+	uint16_t l2_len;
+	uint16_t l3_len;
+	uint16_t l4_len;
+	uint8_t l4_proto;
+	uint8_t is_tunnel;
+	uint16_t outer_ethertype;
+	uint16_t outer_l2_len;
+	uint16_t outer_l3_len;
+	uint8_t outer_l4_proto;
+	uint16_t tso_segsz;
+	uint16_t tunnel_tso_segsz;
+	uint32_t pkt_len;
+};
+
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
 
@@ -2949,6 +2992,555 @@ iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
 	}
 }
 
+/* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
+static inline void
+parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct offload_info *info)
+{
+	struct rte_tcp_hdr *tcp_hdr;
+
+	info->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
+	info->l4_proto = ipv4_hdr->next_proto_id;
+
+	/* only fill l4_len for TCP, it's useful for TSO */
+	if (info->l4_proto == IPPROTO_TCP) {
+		tcp_hdr = (struct rte_tcp_hdr *)
+			((char *)ipv4_hdr + info->l3_len);
+		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+	} else if (info->l4_proto == IPPROTO_UDP) {
+		info->l4_len = sizeof(struct rte_udp_hdr);
+	} else {
+		info->l4_len = 0;
+	}
+}
+
+/* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
+static inline void
+parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct offload_info *info)
+{
+	struct rte_tcp_hdr *tcp_hdr;
+
+	info->l3_len = sizeof(struct rte_ipv6_hdr);
+	info->l4_proto = ipv6_hdr->proto;
+
+	/* only fill l4_len for TCP, it's useful for TSO */
+	if (info->l4_proto == IPPROTO_TCP) {
+		tcp_hdr = (struct rte_tcp_hdr *)
+			((char *)ipv6_hdr + info->l3_len);
+		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+	} else if (info->l4_proto == IPPROTO_UDP) {
+		info->l4_len = sizeof(struct rte_udp_hdr);
+	} else {
+		info->l4_len = 0;
+	}
+}
+
+/*
+ * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
+ * ipproto. This function is able to recognize IPv4/IPv6 with optional VLAN
+ * headers. The l4_len argument is only set in case of TCP (useful for TSO).
+ */
+static inline void
+parse_ethernet(struct rte_ether_hdr *eth_hdr, struct offload_info *info)
+{
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_vlan_hdr *vlan_hdr;
+
+	info->l2_len = sizeof(struct rte_ether_hdr);
+	info->ethertype = eth_hdr->ether_type;
+
+	while (info->ethertype == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+	       info->ethertype == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
+		vlan_hdr = (struct rte_vlan_hdr *)
+			((char *)eth_hdr + info->l2_len);
+		info->l2_len  += sizeof(struct rte_vlan_hdr);
+		info->ethertype = vlan_hdr->eth_proto;
+	}
+
+	switch (info->ethertype) {
+	case RTE_STATIC_BSWAP16(RTE_ETHER_TYPE_IPV4):
+		ipv4_hdr = (struct rte_ipv4_hdr *)
+			((char *)eth_hdr + info->l2_len);
+		parse_ipv4(ipv4_hdr, info);
+		break;
+	case RTE_STATIC_BSWAP16(RTE_ETHER_TYPE_IPV6):
+		ipv6_hdr = (struct rte_ipv6_hdr *)
+			((char *)eth_hdr + info->l2_len);
+		parse_ipv6(ipv6_hdr, info);
+		break;
+	default:
+		info->l4_len = 0;
+		info->l3_len = 0;
+		info->l4_proto = 0;
+		break;
+	}
+}
+
+/* Fill in outer layers length */
+static inline void
+update_tunnel_outer(struct offload_info *info)
+{
+	info->is_tunnel = 1;
+	info->outer_ethertype = info->ethertype;
+	info->outer_l2_len = info->l2_len;
+	info->outer_l3_len = info->l3_len;
+	info->outer_l4_proto = info->l4_proto;
+}
+
+/*
+ * Parse a GTP protocol header.
+ * No optional fields and next extension header type.
+ */
+static inline void
+parse_gtp(struct rte_udp_hdr *udp_hdr,
+	  struct offload_info *info)
+{
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_gtp_hdr *gtp_hdr;
+	uint8_t gtp_len = sizeof(*gtp_hdr);
+	uint8_t ip_ver;
+
+	/* Check udp destination port. */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(RTE_GTPC_UDP_PORT) &&
+	    udp_hdr->src_port != rte_cpu_to_be_16(RTE_GTPC_UDP_PORT) &&
+	    udp_hdr->dst_port != rte_cpu_to_be_16(RTE_GTPU_UDP_PORT))
+		return;
+
+	update_tunnel_outer(info);
+	info->l2_len = 0;
+
+	gtp_hdr = (struct rte_gtp_hdr *)((char *)udp_hdr +
+		  sizeof(struct rte_udp_hdr));
+
+	/*
+	 * Check message type. If message type is 0xff, it is
+	 * a GTP data packet. If not, it is a GTP control packet
+	 */
+	if (gtp_hdr->msg_type == 0xff) {
+		ip_ver = *(uint8_t *)((char *)udp_hdr +
+			 sizeof(struct rte_udp_hdr) +
+			 sizeof(struct rte_gtp_hdr));
+		ip_ver = (ip_ver) & 0xf0;
+
+		if (ip_ver == RTE_GTP_TYPE_IPV4) {
+			ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gtp_hdr +
+				   gtp_len);
+			info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+			parse_ipv4(ipv4_hdr, info);
+		} else if (ip_ver == RTE_GTP_TYPE_IPV6) {
+			ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gtp_hdr +
+				   gtp_len);
+			info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+			parse_ipv6(ipv6_hdr, info);
+		}
+	} else {
+		info->ethertype = 0;
+		info->l4_len = 0;
+		info->l3_len = 0;
+		info->l4_proto = 0;
+	}
+
+	info->l2_len += RTE_ETHER_GTP_HLEN;
+}
+
+/* Parse a vxlan header */
+static inline void
+parse_vxlan(struct rte_udp_hdr *udp_hdr,
+	    struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+
+	/* check udp destination port, RTE_VXLAN_DEFAULT_PORT (4789) is the
+	 * default vxlan port (rfc7348) or that the rx offload flag is set
+	 * (i40e only currently)
+	 */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(RTE_VXLAN_DEFAULT_PORT))
+		return;
+
+	update_tunnel_outer(info);
+
+	eth_hdr = (struct rte_ether_hdr *)((char *)udp_hdr +
+		sizeof(struct rte_udp_hdr) +
+		sizeof(struct rte_vxlan_hdr));
+
+	parse_ethernet(eth_hdr, info);
+	info->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */
+}
+
+/* Parse a vxlan-gpe header */
+static inline void
+parse_vxlan_gpe(struct rte_udp_hdr *udp_hdr,
+	    struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_vxlan_gpe_hdr *vxlan_gpe_hdr;
+	uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
+
+	/* Check udp destination port. */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(vxlan_gpe_udp_port))
+		return;
+
+	vxlan_gpe_hdr = (struct rte_vxlan_gpe_hdr *)((char *)udp_hdr +
+				sizeof(struct rte_udp_hdr));
+
+	if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
+	    RTE_VXLAN_GPE_TYPE_IPV4) {
+		update_tunnel_outer(info);
+
+		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)vxlan_gpe_hdr +
+			   vxlan_gpe_len);
+
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+		info->l2_len = 0;
+
+	} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) {
+		update_tunnel_outer(info);
+
+		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)vxlan_gpe_hdr +
+			   vxlan_gpe_len);
+
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+		parse_ipv6(ipv6_hdr, info);
+		info->l2_len = 0;
+
+	} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) {
+		update_tunnel_outer(info);
+
+		eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_gpe_hdr +
+			  vxlan_gpe_len);
+
+		parse_ethernet(eth_hdr, info);
+	} else {
+		return;
+	}
+
+
+	info->l2_len += RTE_ETHER_VXLAN_GPE_HLEN;
+}
+
+/* Parse a geneve header */
+static inline void
+parse_geneve(struct rte_udp_hdr *udp_hdr,
+	    struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_geneve_hdr *geneve_hdr;
+	uint16_t geneve_len;
+
+	/* Check udp destination port. */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(geneve_udp_port))
+		return;
+
+	geneve_hdr = (struct rte_geneve_hdr *)((char *)udp_hdr +
+				sizeof(struct rte_udp_hdr));
+	geneve_len = sizeof(struct rte_geneve_hdr) + geneve_hdr->opt_len * 4;
+	if (!geneve_hdr->proto || geneve_hdr->proto ==
+	    rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+		update_tunnel_outer(info);
+		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)geneve_hdr +
+			   geneve_len);
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+		info->l2_len = 0;
+	} else if (geneve_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+		update_tunnel_outer(info);
+		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)geneve_hdr +
+			   geneve_len);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+		parse_ipv6(ipv6_hdr, info);
+		info->l2_len = 0;
+
+	} else if (geneve_hdr->proto == rte_cpu_to_be_16(RTE_GENEVE_TYPE_ETH)) {
+		update_tunnel_outer(info);
+		eth_hdr = (struct rte_ether_hdr *)((char *)geneve_hdr +
+			  geneve_len);
+		parse_ethernet(eth_hdr, info);
+	} else {
+		return;
+	}
+
+	info->l2_len +=
+		(sizeof(struct rte_udp_hdr) + sizeof(struct rte_geneve_hdr) +
+		((struct rte_geneve_hdr *)geneve_hdr)->opt_len * 4);
+}
+
+/* Parse a gre header */
+static inline void
+parse_gre(struct simple_gre_hdr *gre_hdr, struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	uint8_t gre_len = 0;
+
+	gre_len += sizeof(struct simple_gre_hdr);
+
+	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_KEY_PRESENT))
+		gre_len += GRE_EXT_LEN;
+	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_SEQUENCE_PRESENT))
+		gre_len += GRE_EXT_LEN;
+	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_CHECKSUM_PRESENT))
+		gre_len += GRE_EXT_LEN;
+
+	if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+		update_tunnel_outer(info);
+
+		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gre_hdr + gre_len);
+
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+		info->l2_len = 0;
+
+	} else if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+		update_tunnel_outer(info);
+
+		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gre_hdr + gre_len);
+
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+		parse_ipv6(ipv6_hdr, info);
+		info->l2_len = 0;
+
+	} else if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
+		update_tunnel_outer(info);
+
+		eth_hdr = (struct rte_ether_hdr *)((char *)gre_hdr + gre_len);
+
+		parse_ethernet(eth_hdr, info);
+	} else {
+		return;
+	}
+
+	info->l2_len += gre_len;
+}
+
+/* Parse an encapsulated ip or ipv6 header */
+static inline void
+parse_encap_ip(void *encap_ip, struct offload_info *info)
+{
+	struct rte_ipv4_hdr *ipv4_hdr = encap_ip;
+	struct rte_ipv6_hdr *ipv6_hdr = encap_ip;
+	uint8_t ip_version;
+
+	ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
+
+	if (ip_version != 4 && ip_version != 6)
+		return;
+
+	info->is_tunnel = 1;
+	info->outer_ethertype = info->ethertype;
+	info->outer_l2_len = info->l2_len;
+	info->outer_l3_len = info->l3_len;
+
+	if (ip_version == 4) {
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+	} else {
+		parse_ipv6(ipv6_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+	}
+	info->l2_len = 0;
+}
+
+static  inline int
+check_mbuf_len(struct offload_info *info, struct rte_mbuf *m)
+{
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+		if (info->outer_l2_len != m->outer_l2_len) {
+			PMD_TX_LOG(ERR, "outer_l2_len error in mbuf. Original "
+			"length: %hu, calculated length: %u", m->outer_l2_len,
+			info->outer_l2_len);
+			return -1;
+		}
+		if (info->outer_l3_len != m->outer_l3_len) {
+			PMD_TX_LOG(ERR, "outer_l3_len error in mbuf. Original "
+			"length: %hu,calculated length: %u", m->outer_l3_len,
+			info->outer_l3_len);
+			return -1;
+		}
+	}
+
+	if (info->l2_len != m->l2_len) {
+		PMD_TX_LOG(ERR, "l2_len error in mbuf. Original "
+		"length: %hu, calculated length: %u", m->l2_len,
+		info->l2_len);
+		return -1;
+	}
+	if (info->l3_len != m->l3_len) {
+		PMD_TX_LOG(ERR, "l3_len error in mbuf. Original "
+		"length: %hu, calculated length: %u", m->l3_len,
+		info->l3_len);
+		return -1;
+	}
+	if (info->l4_len != m->l4_len) {
+		PMD_TX_LOG(ERR, "l4_len error in mbuf. Original "
+		"length: %hu, calculated length: %u", m->l4_len,
+		info->l4_len);
+		return -1;
+	}
+
+	return 0;
+}
+
+static  inline int
+check_ether_type(struct offload_info *info, struct rte_mbuf *m)
+{
+	int ret = 0;
+
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+		if (info->outer_ethertype ==
+			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+			if (~(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, "
+				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV4` flag.");
+				ret = -1;
+			}
+			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, tx "
+				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV6` flag");
+				ret = -1;
+			}
+		} else if (info->outer_ethertype ==
+			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+			if (~(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, "
+				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV6` flag.");
+				ret = -1;
+			}
+			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, tx "
+				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV4` flag");
+				ret = -1;
+			}
+		}
+	}
+
+	if (info->ethertype ==
+		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+		if (~(m->ol_flags & RTE_MBUF_F_TX_IPV4)) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx offload "
+			"missing `RTE_MBUF_F_TX_IPV4` flag.");
+			ret = -1;
+		}
+		if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx "
+			"offload contains wrong `RTE_MBUF_F_TX_IPV6` flag");
+			ret = -1;
+		}
+	} else if (info->ethertype ==
+		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+		if (~(m->ol_flags & RTE_MBUF_F_TX_IPV6)) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			"missing `RTE_MBUF_F_TX_IPV6` flag.");
+			ret = -1;
+		}
+		if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			"contains wrong `RTE_MBUF_F_TX_IPV4` flag");
+			ret = -1;
+		}
+	}
+
+	return ret;
+}
+
+/* Check whether the parameters of mubf are correct. */
+__rte_unused static  inline int
+iavf_check_mbuf(struct rte_mbuf *m)
+{
+	struct rte_ether_hdr *eth_hdr;
+	void *l3_hdr = NULL; /* can be IPv4 or IPv6 */
+	struct offload_info info = {0};
+	uint64_t ol_flags = m->ol_flags;
+
+	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+	parse_ethernet(eth_hdr, &info);
+	l3_hdr = (char *)eth_hdr + info.l2_len;
+	if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+		if (info.l4_proto == IPPROTO_UDP) {
+			struct rte_udp_hdr *udp_hdr;
+
+			udp_hdr = (struct rte_udp_hdr *)
+				((char *)l3_hdr + info.l3_len);
+			parse_gtp(udp_hdr, &info);
+			if (info.is_tunnel) {
+				if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GTP)) {
+					PMD_TX_LOG(ERR, "gtp tunnel packet missing tx flag");
+					return -1;
+				}
+				goto check_len;
+			}
+			parse_vxlan_gpe(udp_hdr, &info);
+			if (info.is_tunnel) {
+				if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE)) {
+					PMD_TX_LOG(ERR, "vxlan gpe tunnel packet missing tx flag");
+					return -1;
+				}
+				goto check_len;
+			}
+			parse_vxlan(udp_hdr, &info);
+			if (info.is_tunnel) {
+				if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN)) {
+					PMD_TX_LOG(ERR, "vxlan tunnel packet missing tx flag");
+					return -1;
+				}
+				goto check_len;
+			}
+			parse_geneve(udp_hdr, &info);
+			if (info.is_tunnel) {
+				if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GENEVE)) {
+					PMD_TX_LOG(ERR, "geneve tunnel packet missing tx flag");
+					return -1;
+				}
+				goto check_len;
+			}
+			/* Always keep last. */
+			if (unlikely(RTE_ETH_IS_TUNNEL_PKT(m->packet_type)
+				!= 0)) {
+				PMD_TX_LOG(ERR, "Unknown tunnel packet. UDP dst port: %hu",
+					udp_hdr->dst_port);
+					return -1;
+			}
+		} else if (info.l4_proto == IPPROTO_GRE) {
+			struct simple_gre_hdr *gre_hdr;
+
+			gre_hdr = (struct simple_gre_hdr *)((char *)l3_hdr +
+				info.l3_len);
+			parse_gre(gre_hdr, &info);
+			if (info.is_tunnel) {
+				if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GRE)) {
+					PMD_TX_LOG(ERR, "gre tunnel packet missing tx flag");
+					return -1;
+				}
+				goto check_len;
+			}
+		} else if (info.l4_proto == IPPROTO_IPIP) {
+			void *encap_ip_hdr;
+
+			encap_ip_hdr = (char *)l3_hdr + info.l3_len;
+			parse_encap_ip(encap_ip_hdr, &info);
+			if (info.is_tunnel) {
+				if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IPIP)) {
+					PMD_TX_LOG(ERR, "Ipip tunnel packet missing tx flag");
+					return -1;
+				}
+				goto check_len;
+			}
+		}
+	}
+
+check_len:
+	if (check_mbuf_len(&info, m) != 0)
+		return -1;
+
+	return check_ether_type(&info, m);
+}
+
 /* TX prep functions */
 uint16_t
 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -3017,6 +3609,13 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 				return i;
 			}
 		}
+#ifdef RTE_ETHDEV_DEBUG_TX
+		ret = iavf_check_mbuf(m);
+		if (ret != 0) {
+			rte_errno = EINVAL;
+			return i;
+		}
+#endif
 	}
 
 	return i;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2] net/iavf: add check for mbuf
  2023-01-31 10:06 [PATCH] net/iavf: add check for mbuf Mingjin Ye
@ 2023-02-02 10:03 ` Mingjin Ye
  2023-02-07  8:57   ` Ye, MingjinX
  2023-02-13  8:31   ` [PATCH v3] net/iavf: add debug checks " Mingjin Ye
  0 siblings, 2 replies; 6+ messages in thread
From: Mingjin Ye @ 2023-02-02 10:03 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, stable, yidingx.zhou, Mingjin Ye, Jingjing Wu, Beilei Xing

The scalar Tx path would send wrong mbuf that causes the kernel driver to
fire the MDD event.

This patch adds mbuf detection in tx_prepare to fix this issue, rte_errno
will be set to EINVAL and returned if the verification fails.

Fixes: 3fd32df381f8 ("net/iavf: check Tx packet with correct UP and queue")
Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction")
Fixes: f28fbd1e6b50 ("net/iavf: check max SIMD bitwidth")
Cc: stable@dpdk.org

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c | 647 +++++++++++++++++++++++++++++++++++
 1 file changed, 647 insertions(+)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 8d49967538..93138edf01 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -24,12 +24,55 @@
 #include <rte_ip.h>
 #include <rte_net.h>
 #include <rte_vect.h>
+#include <rte_vxlan.h>
+#include <rte_gtp.h>
+#include <rte_geneve.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
 #include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
+#define GRE_CHECKSUM_PRESENT	0x8000
+#define GRE_KEY_PRESENT		0x2000
+#define GRE_SEQUENCE_PRESENT	0x1000
+#define GRE_EXT_LEN		4
+#define GRE_SUPPORTED_FIELDS	(GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\
+				 GRE_SEQUENCE_PRESENT)
+
+#ifndef IPPROTO_IPIP
+#define IPPROTO_IPIP 4
+#endif
+#ifndef IPPROTO_GRE
+#define IPPROTO_GRE	47
+#endif
+
+static uint16_t vxlan_gpe_udp_port = RTE_VXLAN_GPE_DEFAULT_PORT;
+static uint16_t geneve_udp_port = RTE_GENEVE_DEFAULT_PORT;
+
+struct simple_gre_hdr {
+	uint16_t flags;
+	uint16_t proto;
+} __rte_packed;
+
+/* structure that caches offload info for the current packet */
+struct offload_info {
+	uint16_t ethertype;
+	uint8_t gso_enable;
+	uint16_t l2_len;
+	uint16_t l3_len;
+	uint16_t l4_len;
+	uint8_t l4_proto;
+	uint8_t is_tunnel;
+	uint16_t outer_ethertype;
+	uint16_t outer_l2_len;
+	uint16_t outer_l3_len;
+	uint8_t outer_l4_proto;
+	uint16_t tso_segsz;
+	uint16_t tunnel_tso_segsz;
+	uint32_t pkt_len;
+};
+
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
 
@@ -2949,6 +2992,603 @@ iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
 	}
 }
 
+/* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
+static inline void
+parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct offload_info *info)
+{
+	struct rte_tcp_hdr *tcp_hdr;
+
+	info->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
+	info->l4_proto = ipv4_hdr->next_proto_id;
+
+	/* only fill l4_len for TCP, it's useful for TSO */
+	if (info->l4_proto == IPPROTO_TCP) {
+		tcp_hdr = (struct rte_tcp_hdr *)
+			((char *)ipv4_hdr + info->l3_len);
+		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+	} else if (info->l4_proto == IPPROTO_UDP) {
+		info->l4_len = sizeof(struct rte_udp_hdr);
+	} else {
+		info->l4_len = 0;
+	}
+}
+
+/* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
+static inline void
+parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct offload_info *info)
+{
+	struct rte_tcp_hdr *tcp_hdr;
+
+	info->l3_len = sizeof(struct rte_ipv6_hdr);
+	info->l4_proto = ipv6_hdr->proto;
+
+	/* only fill l4_len for TCP, it's useful for TSO */
+	if (info->l4_proto == IPPROTO_TCP) {
+		tcp_hdr = (struct rte_tcp_hdr *)
+			((char *)ipv6_hdr + info->l3_len);
+		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+	} else if (info->l4_proto == IPPROTO_UDP) {
+		info->l4_len = sizeof(struct rte_udp_hdr);
+	} else {
+		info->l4_len = 0;
+	}
+}
+
+/*
+ * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
+ * ipproto. This function is able to recognize IPv4/IPv6 with optional VLAN
+ * headers. The l4_len argument is only set in case of TCP (useful for TSO).
+ */
+static inline void
+parse_ethernet(struct rte_ether_hdr *eth_hdr, struct offload_info *info)
+{
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_vlan_hdr *vlan_hdr;
+
+	info->l2_len = sizeof(struct rte_ether_hdr);
+	info->ethertype = eth_hdr->ether_type;
+
+	while (info->ethertype == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+	       info->ethertype == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
+		vlan_hdr = (struct rte_vlan_hdr *)
+			((char *)eth_hdr + info->l2_len);
+		info->l2_len  += sizeof(struct rte_vlan_hdr);
+		info->ethertype = vlan_hdr->eth_proto;
+	}
+
+	switch (info->ethertype) {
+	case RTE_STATIC_BSWAP16(RTE_ETHER_TYPE_IPV4):
+		ipv4_hdr = (struct rte_ipv4_hdr *)
+			((char *)eth_hdr + info->l2_len);
+		parse_ipv4(ipv4_hdr, info);
+		break;
+	case RTE_STATIC_BSWAP16(RTE_ETHER_TYPE_IPV6):
+		ipv6_hdr = (struct rte_ipv6_hdr *)
+			((char *)eth_hdr + info->l2_len);
+		parse_ipv6(ipv6_hdr, info);
+		break;
+	default:
+		info->l4_len = 0;
+		info->l3_len = 0;
+		info->l4_proto = 0;
+		break;
+	}
+}
+
+/* Fill in outer layers length */
+static inline void
+update_tunnel_outer(struct offload_info *info)
+{
+	info->is_tunnel = 1;
+	info->outer_ethertype = info->ethertype;
+	info->outer_l2_len = info->l2_len;
+	info->outer_l3_len = info->l3_len;
+	info->outer_l4_proto = info->l4_proto;
+}
+
+/*
+ * Parse a GTP protocol header.
+ * No optional fields and next extension header type.
+ */
+static inline void
+parse_gtp(struct rte_udp_hdr *udp_hdr,
+	  struct offload_info *info)
+{
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_gtp_hdr *gtp_hdr;
+	uint8_t gtp_len = sizeof(*gtp_hdr);
+	uint8_t ip_ver;
+
+	/* Check udp destination port. */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(RTE_GTPC_UDP_PORT) &&
+	    udp_hdr->src_port != rte_cpu_to_be_16(RTE_GTPC_UDP_PORT) &&
+	    udp_hdr->dst_port != rte_cpu_to_be_16(RTE_GTPU_UDP_PORT))
+		return;
+
+	update_tunnel_outer(info);
+	info->l2_len = 0;
+
+	gtp_hdr = (struct rte_gtp_hdr *)((char *)udp_hdr +
+		  sizeof(struct rte_udp_hdr));
+
+	/*
+	 * Check message type. If message type is 0xff, it is
+	 * a GTP data packet. If not, it is a GTP control packet
+	 */
+	if (gtp_hdr->msg_type == 0xff) {
+		ip_ver = *(uint8_t *)((char *)udp_hdr +
+			 sizeof(struct rte_udp_hdr) +
+			 sizeof(struct rte_gtp_hdr));
+		ip_ver = (ip_ver) & 0xf0;
+
+		if (ip_ver == RTE_GTP_TYPE_IPV4) {
+			ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gtp_hdr +
+				   gtp_len);
+			info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+			parse_ipv4(ipv4_hdr, info);
+		} else if (ip_ver == RTE_GTP_TYPE_IPV6) {
+			ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gtp_hdr +
+				   gtp_len);
+			info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+			parse_ipv6(ipv6_hdr, info);
+		}
+	} else {
+		info->ethertype = 0;
+		info->l4_len = 0;
+		info->l3_len = 0;
+		info->l4_proto = 0;
+	}
+
+	info->l2_len += RTE_ETHER_GTP_HLEN;
+}
+
+/* Parse a vxlan header */
+static inline void
+parse_vxlan(struct rte_udp_hdr *udp_hdr,
+	    struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+
+	/* check udp destination port, RTE_VXLAN_DEFAULT_PORT (4789) is the
+	 * default vxlan port (rfc7348) or that the rx offload flag is set
+	 * (i40e only currently)
+	 */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(RTE_VXLAN_DEFAULT_PORT))
+		return;
+
+	update_tunnel_outer(info);
+
+	eth_hdr = (struct rte_ether_hdr *)((char *)udp_hdr +
+		sizeof(struct rte_udp_hdr) +
+		sizeof(struct rte_vxlan_hdr));
+
+	parse_ethernet(eth_hdr, info);
+	info->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */
+}
+
+/* Parse a vxlan-gpe header */
+static inline void
+parse_vxlan_gpe(struct rte_udp_hdr *udp_hdr,
+	    struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_vxlan_gpe_hdr *vxlan_gpe_hdr;
+	uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
+
+	/* Check udp destination port. */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(vxlan_gpe_udp_port))
+		return;
+
+	vxlan_gpe_hdr = (struct rte_vxlan_gpe_hdr *)((char *)udp_hdr +
+				sizeof(struct rte_udp_hdr));
+
+	if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
+	    RTE_VXLAN_GPE_TYPE_IPV4) {
+		update_tunnel_outer(info);
+
+		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)vxlan_gpe_hdr +
+			   vxlan_gpe_len);
+
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+		info->l2_len = 0;
+
+	} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) {
+		update_tunnel_outer(info);
+
+		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)vxlan_gpe_hdr +
+			   vxlan_gpe_len);
+
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+		parse_ipv6(ipv6_hdr, info);
+		info->l2_len = 0;
+
+	} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) {
+		update_tunnel_outer(info);
+
+		eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_gpe_hdr +
+			  vxlan_gpe_len);
+
+		parse_ethernet(eth_hdr, info);
+	} else {
+		return;
+	}
+
+
+	info->l2_len += RTE_ETHER_VXLAN_GPE_HLEN;
+}
+
+/* Parse a geneve header */
+static inline void
+parse_geneve(struct rte_udp_hdr *udp_hdr,
+	    struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_geneve_hdr *geneve_hdr;
+	uint16_t geneve_len;
+
+	/* Check udp destination port. */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(geneve_udp_port))
+		return;
+
+	geneve_hdr = (struct rte_geneve_hdr *)((char *)udp_hdr +
+				sizeof(struct rte_udp_hdr));
+	geneve_len = sizeof(struct rte_geneve_hdr) + geneve_hdr->opt_len * 4;
+	if (!geneve_hdr->proto || geneve_hdr->proto ==
+	    rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+		update_tunnel_outer(info);
+		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)geneve_hdr +
+			   geneve_len);
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+		info->l2_len = 0;
+	} else if (geneve_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+		update_tunnel_outer(info);
+		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)geneve_hdr +
+			   geneve_len);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+		parse_ipv6(ipv6_hdr, info);
+		info->l2_len = 0;
+
+	} else if (geneve_hdr->proto == rte_cpu_to_be_16(RTE_GENEVE_TYPE_ETH)) {
+		update_tunnel_outer(info);
+		eth_hdr = (struct rte_ether_hdr *)((char *)geneve_hdr +
+			  geneve_len);
+		parse_ethernet(eth_hdr, info);
+	} else {
+		return;
+	}
+
+	info->l2_len +=
+		(sizeof(struct rte_udp_hdr) + sizeof(struct rte_geneve_hdr) +
+		((struct rte_geneve_hdr *)geneve_hdr)->opt_len * 4);
+}
+
+/* Parse a gre header */
+static inline void
+parse_gre(struct simple_gre_hdr *gre_hdr, struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	uint8_t gre_len = 0;
+
+	gre_len += sizeof(struct simple_gre_hdr);
+
+	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_KEY_PRESENT))
+		gre_len += GRE_EXT_LEN;
+	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_SEQUENCE_PRESENT))
+		gre_len += GRE_EXT_LEN;
+	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_CHECKSUM_PRESENT))
+		gre_len += GRE_EXT_LEN;
+
+	if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+		update_tunnel_outer(info);
+
+		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gre_hdr + gre_len);
+
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+		info->l2_len = 0;
+
+	} else if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+		update_tunnel_outer(info);
+
+		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gre_hdr + gre_len);
+
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+		parse_ipv6(ipv6_hdr, info);
+		info->l2_len = 0;
+
+	} else if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
+		update_tunnel_outer(info);
+
+		eth_hdr = (struct rte_ether_hdr *)((char *)gre_hdr + gre_len);
+
+		parse_ethernet(eth_hdr, info);
+	} else {
+		return;
+	}
+
+	info->l2_len += gre_len;
+}
+
+/* Parse an encapsulated ip or ipv6 header */
+static inline void
+parse_encap_ip(void *encap_ip, struct offload_info *info)
+{
+	struct rte_ipv4_hdr *ipv4_hdr = encap_ip;
+	struct rte_ipv6_hdr *ipv6_hdr = encap_ip;
+	uint8_t ip_version;
+
+	ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
+
+	if (ip_version != 4 && ip_version != 6)
+		return;
+
+	info->is_tunnel = 1;
+	info->outer_ethertype = info->ethertype;
+	info->outer_l2_len = info->l2_len;
+	info->outer_l3_len = info->l3_len;
+
+	if (ip_version == 4) {
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+	} else {
+		parse_ipv6(ipv6_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+	}
+	info->l2_len = 0;
+}
+
+static  inline int
+check_mbuf_len(struct offload_info *info, struct rte_mbuf *m)
+{
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+		if (info->outer_l2_len != m->outer_l2_len) {
+			PMD_TX_LOG(ERR, "outer_l2_len error in mbuf. Original "
+			"length: %hu, calculated length: %u", m->outer_l2_len,
+			info->outer_l2_len);
+			return -1;
+		}
+		if (info->outer_l3_len != m->outer_l3_len) {
+			PMD_TX_LOG(ERR, "outer_l3_len error in mbuf. Original "
+			"length: %hu,calculated length: %u", m->outer_l3_len,
+			info->outer_l3_len);
+			return -1;
+		}
+	}
+
+	if (info->l2_len != m->l2_len) {
+		PMD_TX_LOG(ERR, "l2_len error in mbuf. Original "
+		"length: %hu, calculated length: %u", m->l2_len,
+		info->l2_len);
+		return -1;
+	}
+	if (info->l3_len != m->l3_len) {
+		PMD_TX_LOG(ERR, "l3_len error in mbuf. Original "
+		"length: %hu, calculated length: %u", m->l3_len,
+		info->l3_len);
+		return -1;
+	}
+	if (info->l4_len != m->l4_len) {
+		PMD_TX_LOG(ERR, "l4_len error in mbuf. Original "
+		"length: %hu, calculated length: %u", m->l4_len,
+		info->l4_len);
+		return -1;
+	}
+
+	return 0;
+}
+
+static  inline int
+check_ether_type(struct offload_info *info, struct rte_mbuf *m)
+{
+	int ret = 0;
+
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+		if (info->outer_ethertype ==
+			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, "
+				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV4` flag.");
+				ret = -1;
+			}
+			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, tx "
+				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV6` flag");
+				ret = -1;
+			}
+		} else if (info->outer_ethertype ==
+			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, "
+				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV6` flag.");
+				ret = -1;
+			}
+			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, tx "
+				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV4` flag");
+				ret = -1;
+			}
+		}
+	}
+
+	if (info->ethertype ==
+		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV4)) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx offload "
+			"missing `RTE_MBUF_F_TX_IPV4` flag.");
+			ret = -1;
+		}
+		if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx "
+			"offload contains wrong `RTE_MBUF_F_TX_IPV6` flag");
+			ret = -1;
+		}
+	} else if (info->ethertype ==
+		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV6)) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			"missing `RTE_MBUF_F_TX_IPV6` flag.");
+			ret = -1;
+		}
+		if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			"contains wrong `RTE_MBUF_F_TX_IPV4` flag");
+			ret = -1;
+		}
+	}
+
+	return ret;
+}
+
+/* Check whether the parameters of mubf are correct. */
+__rte_unused static  inline int
+iavf_check_mbuf(struct rte_mbuf *m)
+{
+	struct rte_ether_hdr *eth_hdr;
+	void *l3_hdr = NULL; /* can be IPv4 or IPv6 */
+	struct offload_info info = {0};
+	uint64_t ol_flags = m->ol_flags;
+	uint64_t tunnel_type = ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
+
+	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+	parse_ethernet(eth_hdr, &info);
+	l3_hdr = (char *)eth_hdr + info.l2_len;
+	if (info.l4_proto == IPPROTO_UDP) {
+		struct rte_udp_hdr *udp_hdr;
+
+		udp_hdr = (struct rte_udp_hdr *)
+			((char *)l3_hdr + info.l3_len);
+		parse_gtp(udp_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "gtp tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_GTP` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GTP) {
+				PMD_TX_LOG(ERR, "gtp tunnel packet, tx offload has wrong "
+				"`%s` flag, correct is `RTE_MBUF_F_TX_TUNNEL_GTP` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+		parse_vxlan_gpe(udp_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE) {
+				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+		parse_vxlan(udp_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "vxlan tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_VXLAN` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN) {
+				PMD_TX_LOG(ERR, "vxlan tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_VXLAN` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+		parse_geneve(udp_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "geneve tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_GENEVE` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GENEVE) {
+				PMD_TX_LOG(ERR, "geneve tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_GENEVE` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+		/* Always keep last. */
+		if (unlikely(RTE_ETH_IS_TUNNEL_PKT(m->packet_type)
+			!= 0)) {
+			PMD_TX_LOG(ERR, "Unknown tunnel packet. UDP dst port: %hu",
+				udp_hdr->dst_port);
+				return -1;
+		}
+	} else if (info.l4_proto == IPPROTO_GRE) {
+		struct simple_gre_hdr *gre_hdr;
+
+		gre_hdr = (struct simple_gre_hdr *)((char *)l3_hdr +
+			info.l3_len);
+		parse_gre(gre_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "gre tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_GRE` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GRE) {
+				PMD_TX_LOG(ERR, "gre tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_GRE` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+	} else if (info.l4_proto == IPPROTO_IPIP) {
+		void *encap_ip_hdr;
+
+		encap_ip_hdr = (char *)l3_hdr + info.l3_len;
+		parse_encap_ip(encap_ip_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "Ipip tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_IPIP` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_IPIP) {
+				PMD_TX_LOG(ERR, "Ipip tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_IPIP` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+	}
+
+
+
+check_len:
+	if (check_mbuf_len(&info, m) != 0)
+		return -1;
+
+	return check_ether_type(&info, m);
+}
+
 /* TX prep functions */
 uint16_t
 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -3017,6 +3657,13 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 				return i;
 			}
 		}
+#ifdef RTE_ETHDEV_DEBUG_TX
+		ret = iavf_check_mbuf(m);
+		if (ret != 0) {
+			rte_errno = EINVAL;
+			return i;
+		}
+#endif
 	}
 
 	return i;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: [PATCH v2] net/iavf: add check for mbuf
  2023-02-02 10:03 ` [PATCH v2] " Mingjin Ye
@ 2023-02-07  8:57   ` Ye, MingjinX
  2023-02-13  8:31   ` [PATCH v3] net/iavf: add debug checks " Mingjin Ye
  1 sibling, 0 replies; 6+ messages in thread
From: Ye, MingjinX @ 2023-02-07  8:57 UTC (permalink / raw)
  To: dev, Zhang, Qi Z, Yang, Qiming
  Cc: stable, Zhou, YidingX, Wu, Jingjing, Xing, Beilei, Yang, Guang3

Hi All,

Could you please review and provide suggestions if any.

Thanks,
Mingjin

> -----Original Message-----
> From: Ye, MingjinX <mingjinx.ye@intel.com>
> Sent: 2023年2月2日 18:04
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; stable@dpdk.org; Zhou, YidingX
> <yidingx.zhou@intel.com>; Ye, MingjinX <mingjinx.ye@intel.com>; Wu,
> Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>
> Subject: [PATCH v2] net/iavf: add check for mbuf
> 
> The scalar Tx path would send wrong mbuf that causes the kernel driver to
> fire the MDD event.
> 
> This patch adds mbuf detection in tx_prepare to fix this issue, rte_errno will
> be set to EINVAL and returned if the verification fails.
> 
> Fixes: 3fd32df381f8 ("net/iavf: check Tx packet with correct UP and queue")
> Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction")
> Fixes: f28fbd1e6b50 ("net/iavf: check max SIMD bitwidth")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> ---
>  drivers/net/iavf/iavf_rxtx.c | 647
> +++++++++++++++++++++++++++++++++++
>  1 file changed, 647 insertions(+)
> 
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index
> 8d49967538..93138edf01 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -24,12 +24,55 @@
>  #include <rte_ip.h>
>  #include <rte_net.h>
>  #include <rte_vect.h>
> +#include <rte_vxlan.h>
> +#include <rte_gtp.h>
> +#include <rte_geneve.h>
> 
>  #include "iavf.h"
>  #include "iavf_rxtx.h"
>  #include "iavf_ipsec_crypto.h"
>  #include "rte_pmd_iavf.h"
> 
> +#define GRE_CHECKSUM_PRESENT	0x8000
> +#define GRE_KEY_PRESENT		0x2000
> +#define GRE_SEQUENCE_PRESENT	0x1000
> +#define GRE_EXT_LEN		4
> +#define GRE_SUPPORTED_FIELDS	(GRE_CHECKSUM_PRESENT |
> GRE_KEY_PRESENT |\
> +				 GRE_SEQUENCE_PRESENT)
> +
> +#ifndef IPPROTO_IPIP
> +#define IPPROTO_IPIP 4
> +#endif
> +#ifndef IPPROTO_GRE
> +#define IPPROTO_GRE	47
> +#endif
> +
> +static uint16_t vxlan_gpe_udp_port = RTE_VXLAN_GPE_DEFAULT_PORT;
> static
> +uint16_t geneve_udp_port = RTE_GENEVE_DEFAULT_PORT;
> +
> +struct simple_gre_hdr {
> +	uint16_t flags;
> +	uint16_t proto;
> +} __rte_packed;
> +
> +/* structure that caches offload info for the current packet */ struct
> +offload_info {
> +	uint16_t ethertype;
> +	uint8_t gso_enable;
> +	uint16_t l2_len;
> +	uint16_t l3_len;
> +	uint16_t l4_len;
> +	uint8_t l4_proto;
> +	uint8_t is_tunnel;
> +	uint16_t outer_ethertype;
> +	uint16_t outer_l2_len;
> +	uint16_t outer_l3_len;
> +	uint8_t outer_l4_proto;
> +	uint16_t tso_segsz;
> +	uint16_t tunnel_tso_segsz;
> +	uint32_t pkt_len;
> +};
> +
>  /* Offset of mbuf dynamic field for protocol extraction's metadata */  int
> rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
> 
> @@ -2949,6 +2992,603 @@ iavf_check_vlan_up2tc(struct iavf_tx_queue
> *txq, struct rte_mbuf *m)
>  	}
>  }
> 
> +/* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */ static
> +inline void parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct
> +offload_info *info) {
> +	struct rte_tcp_hdr *tcp_hdr;
> +
> +	info->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
> +	info->l4_proto = ipv4_hdr->next_proto_id;
> +
> +	/* only fill l4_len for TCP, it's useful for TSO */
> +	if (info->l4_proto == IPPROTO_TCP) {
> +		tcp_hdr = (struct rte_tcp_hdr *)
> +			((char *)ipv4_hdr + info->l3_len);
> +		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
> +	} else if (info->l4_proto == IPPROTO_UDP) {
> +		info->l4_len = sizeof(struct rte_udp_hdr);
> +	} else {
> +		info->l4_len = 0;
> +	}
> +}
> +
> +/* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */ static
> +inline void parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct
> +offload_info *info) {
> +	struct rte_tcp_hdr *tcp_hdr;
> +
> +	info->l3_len = sizeof(struct rte_ipv6_hdr);
> +	info->l4_proto = ipv6_hdr->proto;
> +
> +	/* only fill l4_len for TCP, it's useful for TSO */
> +	if (info->l4_proto == IPPROTO_TCP) {
> +		tcp_hdr = (struct rte_tcp_hdr *)
> +			((char *)ipv6_hdr + info->l3_len);
> +		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
> +	} else if (info->l4_proto == IPPROTO_UDP) {
> +		info->l4_len = sizeof(struct rte_udp_hdr);
> +	} else {
> +		info->l4_len = 0;
> +	}
> +}
> +
> +/*
> + * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
> + * ipproto. This function is able to recognize IPv4/IPv6 with optional
> +VLAN
> + * headers. The l4_len argument is only set in case of TCP (useful for TSO).
> + */
> +static inline void
> +parse_ethernet(struct rte_ether_hdr *eth_hdr, struct offload_info
> +*info) {
> +	struct rte_ipv4_hdr *ipv4_hdr;
> +	struct rte_ipv6_hdr *ipv6_hdr;
> +	struct rte_vlan_hdr *vlan_hdr;
> +
> +	info->l2_len = sizeof(struct rte_ether_hdr);
> +	info->ethertype = eth_hdr->ether_type;
> +
> +	while (info->ethertype ==
> rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
> +	       info->ethertype == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ))
> {
> +		vlan_hdr = (struct rte_vlan_hdr *)
> +			((char *)eth_hdr + info->l2_len);
> +		info->l2_len  += sizeof(struct rte_vlan_hdr);
> +		info->ethertype = vlan_hdr->eth_proto;
> +	}
> +
> +	switch (info->ethertype) {
> +	case RTE_STATIC_BSWAP16(RTE_ETHER_TYPE_IPV4):
> +		ipv4_hdr = (struct rte_ipv4_hdr *)
> +			((char *)eth_hdr + info->l2_len);
> +		parse_ipv4(ipv4_hdr, info);
> +		break;
> +	case RTE_STATIC_BSWAP16(RTE_ETHER_TYPE_IPV6):
> +		ipv6_hdr = (struct rte_ipv6_hdr *)
> +			((char *)eth_hdr + info->l2_len);
> +		parse_ipv6(ipv6_hdr, info);
> +		break;
> +	default:
> +		info->l4_len = 0;
> +		info->l3_len = 0;
> +		info->l4_proto = 0;
> +		break;
> +	}
> +}
> +
> +/* Fill in outer layers length */
> +static inline void
> +update_tunnel_outer(struct offload_info *info) {
> +	info->is_tunnel = 1;
> +	info->outer_ethertype = info->ethertype;
> +	info->outer_l2_len = info->l2_len;
> +	info->outer_l3_len = info->l3_len;
> +	info->outer_l4_proto = info->l4_proto; }
> +
> +/*
> + * Parse a GTP protocol header.
> + * No optional fields and next extension header type.
> + */
> +static inline void
> +parse_gtp(struct rte_udp_hdr *udp_hdr,
> +	  struct offload_info *info)
> +{
> +	struct rte_ipv4_hdr *ipv4_hdr;
> +	struct rte_ipv6_hdr *ipv6_hdr;
> +	struct rte_gtp_hdr *gtp_hdr;
> +	uint8_t gtp_len = sizeof(*gtp_hdr);
> +	uint8_t ip_ver;
> +
> +	/* Check udp destination port. */
> +	if (udp_hdr->dst_port != rte_cpu_to_be_16(RTE_GTPC_UDP_PORT)
> &&
> +	    udp_hdr->src_port != rte_cpu_to_be_16(RTE_GTPC_UDP_PORT)
> &&
> +	    udp_hdr->dst_port != rte_cpu_to_be_16(RTE_GTPU_UDP_PORT))
> +		return;
> +
> +	update_tunnel_outer(info);
> +	info->l2_len = 0;
> +
> +	gtp_hdr = (struct rte_gtp_hdr *)((char *)udp_hdr +
> +		  sizeof(struct rte_udp_hdr));
> +
> +	/*
> +	 * Check message type. If message type is 0xff, it is
> +	 * a GTP data packet. If not, it is a GTP control packet
> +	 */
> +	if (gtp_hdr->msg_type == 0xff) {
> +		ip_ver = *(uint8_t *)((char *)udp_hdr +
> +			 sizeof(struct rte_udp_hdr) +
> +			 sizeof(struct rte_gtp_hdr));
> +		ip_ver = (ip_ver) & 0xf0;
> +
> +		if (ip_ver == RTE_GTP_TYPE_IPV4) {
> +			ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gtp_hdr +
> +				   gtp_len);
> +			info->ethertype =
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
> +			parse_ipv4(ipv4_hdr, info);
> +		} else if (ip_ver == RTE_GTP_TYPE_IPV6) {
> +			ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gtp_hdr +
> +				   gtp_len);
> +			info->ethertype =
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
> +			parse_ipv6(ipv6_hdr, info);
> +		}
> +	} else {
> +		info->ethertype = 0;
> +		info->l4_len = 0;
> +		info->l3_len = 0;
> +		info->l4_proto = 0;
> +	}
> +
> +	info->l2_len += RTE_ETHER_GTP_HLEN;
> +}
> +
> +/* Parse a vxlan header */
> +static inline void
> +parse_vxlan(struct rte_udp_hdr *udp_hdr,
> +	    struct offload_info *info)
> +{
> +	struct rte_ether_hdr *eth_hdr;
> +
> +	/* check udp destination port, RTE_VXLAN_DEFAULT_PORT (4789) is
> the
> +	 * default vxlan port (rfc7348) or that the rx offload flag is set
> +	 * (i40e only currently)
> +	 */
> +	if (udp_hdr->dst_port !=
> rte_cpu_to_be_16(RTE_VXLAN_DEFAULT_PORT))
> +		return;
> +
> +	update_tunnel_outer(info);
> +
> +	eth_hdr = (struct rte_ether_hdr *)((char *)udp_hdr +
> +		sizeof(struct rte_udp_hdr) +
> +		sizeof(struct rte_vxlan_hdr));
> +
> +	parse_ethernet(eth_hdr, info);
> +	info->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */ }
> +
> +/* Parse a vxlan-gpe header */
> +static inline void
> +parse_vxlan_gpe(struct rte_udp_hdr *udp_hdr,
> +	    struct offload_info *info)
> +{
> +	struct rte_ether_hdr *eth_hdr;
> +	struct rte_ipv4_hdr *ipv4_hdr;
> +	struct rte_ipv6_hdr *ipv6_hdr;
> +	struct rte_vxlan_gpe_hdr *vxlan_gpe_hdr;
> +	uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
> +
> +	/* Check udp destination port. */
> +	if (udp_hdr->dst_port != rte_cpu_to_be_16(vxlan_gpe_udp_port))
> +		return;
> +
> +	vxlan_gpe_hdr = (struct rte_vxlan_gpe_hdr *)((char *)udp_hdr +
> +				sizeof(struct rte_udp_hdr));
> +
> +	if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
> +	    RTE_VXLAN_GPE_TYPE_IPV4) {
> +		update_tunnel_outer(info);
> +
> +		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)vxlan_gpe_hdr +
> +			   vxlan_gpe_len);
> +
> +		parse_ipv4(ipv4_hdr, info);
> +		info->ethertype =
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
> +		info->l2_len = 0;
> +
> +	} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) {
> +		update_tunnel_outer(info);
> +
> +		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)vxlan_gpe_hdr +
> +			   vxlan_gpe_len);
> +
> +		info->ethertype =
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
> +		parse_ipv6(ipv6_hdr, info);
> +		info->l2_len = 0;
> +
> +	} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) {
> +		update_tunnel_outer(info);
> +
> +		eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_gpe_hdr +
> +			  vxlan_gpe_len);
> +
> +		parse_ethernet(eth_hdr, info);
> +	} else {
> +		return;
> +	}
> +
> +
> +	info->l2_len += RTE_ETHER_VXLAN_GPE_HLEN; }
> +
> +/* Parse a geneve header */
> +static inline void
> +parse_geneve(struct rte_udp_hdr *udp_hdr,
> +	    struct offload_info *info)
> +{
> +	struct rte_ether_hdr *eth_hdr;
> +	struct rte_ipv4_hdr *ipv4_hdr;
> +	struct rte_ipv6_hdr *ipv6_hdr;
> +	struct rte_geneve_hdr *geneve_hdr;
> +	uint16_t geneve_len;
> +
> +	/* Check udp destination port. */
> +	if (udp_hdr->dst_port != rte_cpu_to_be_16(geneve_udp_port))
> +		return;
> +
> +	geneve_hdr = (struct rte_geneve_hdr *)((char *)udp_hdr +
> +				sizeof(struct rte_udp_hdr));
> +	geneve_len = sizeof(struct rte_geneve_hdr) + geneve_hdr->opt_len
> * 4;
> +	if (!geneve_hdr->proto || geneve_hdr->proto ==
> +	    rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> +		update_tunnel_outer(info);
> +		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)geneve_hdr +
> +			   geneve_len);
> +		parse_ipv4(ipv4_hdr, info);
> +		info->ethertype =
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
> +		info->l2_len = 0;
> +	} else if (geneve_hdr->proto ==
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
> +		update_tunnel_outer(info);
> +		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)geneve_hdr +
> +			   geneve_len);
> +		info->ethertype =
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
> +		parse_ipv6(ipv6_hdr, info);
> +		info->l2_len = 0;
> +
> +	} else if (geneve_hdr->proto ==
> rte_cpu_to_be_16(RTE_GENEVE_TYPE_ETH)) {
> +		update_tunnel_outer(info);
> +		eth_hdr = (struct rte_ether_hdr *)((char *)geneve_hdr +
> +			  geneve_len);
> +		parse_ethernet(eth_hdr, info);
> +	} else {
> +		return;
> +	}
> +
> +	info->l2_len +=
> +		(sizeof(struct rte_udp_hdr) + sizeof(struct rte_geneve_hdr)
> +
> +		((struct rte_geneve_hdr *)geneve_hdr)->opt_len * 4); }
> +
> +/* Parse a gre header */
> +static inline void
> +parse_gre(struct simple_gre_hdr *gre_hdr, struct offload_info *info) {
> +	struct rte_ether_hdr *eth_hdr;
> +	struct rte_ipv4_hdr *ipv4_hdr;
> +	struct rte_ipv6_hdr *ipv6_hdr;
> +	uint8_t gre_len = 0;
> +
> +	gre_len += sizeof(struct simple_gre_hdr);
> +
> +	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_KEY_PRESENT))
> +		gre_len += GRE_EXT_LEN;
> +	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_SEQUENCE_PRESENT))
> +		gre_len += GRE_EXT_LEN;
> +	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_CHECKSUM_PRESENT))
> +		gre_len += GRE_EXT_LEN;
> +
> +	if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> +		update_tunnel_outer(info);
> +
> +		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gre_hdr + gre_len);
> +
> +		parse_ipv4(ipv4_hdr, info);
> +		info->ethertype =
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
> +		info->l2_len = 0;
> +
> +	} else if (gre_hdr->proto ==
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
> +		update_tunnel_outer(info);
> +
> +		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gre_hdr + gre_len);
> +
> +		info->ethertype =
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
> +		parse_ipv6(ipv6_hdr, info);
> +		info->l2_len = 0;
> +
> +	} else if (gre_hdr->proto ==
> rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
> +		update_tunnel_outer(info);
> +
> +		eth_hdr = (struct rte_ether_hdr *)((char *)gre_hdr +
> gre_len);
> +
> +		parse_ethernet(eth_hdr, info);
> +	} else {
> +		return;
> +	}
> +
> +	info->l2_len += gre_len;
> +}
> +
> +/* Parse an encapsulated ip or ipv6 header */ static inline void
> +parse_encap_ip(void *encap_ip, struct offload_info *info) {
> +	struct rte_ipv4_hdr *ipv4_hdr = encap_ip;
> +	struct rte_ipv6_hdr *ipv6_hdr = encap_ip;
> +	uint8_t ip_version;
> +
> +	ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
> +
> +	if (ip_version != 4 && ip_version != 6)
> +		return;
> +
> +	info->is_tunnel = 1;
> +	info->outer_ethertype = info->ethertype;
> +	info->outer_l2_len = info->l2_len;
> +	info->outer_l3_len = info->l3_len;
> +
> +	if (ip_version == 4) {
> +		parse_ipv4(ipv4_hdr, info);
> +		info->ethertype =
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
> +	} else {
> +		parse_ipv6(ipv6_hdr, info);
> +		info->ethertype =
> rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
> +	}
> +	info->l2_len = 0;
> +}
> +
> +static  inline int
> +check_mbuf_len(struct offload_info *info, struct rte_mbuf *m) {
> +	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
> +		if (info->outer_l2_len != m->outer_l2_len) {
> +			PMD_TX_LOG(ERR, "outer_l2_len error in mbuf.
> Original "
> +			"length: %hu, calculated length: %u", m-
> >outer_l2_len,
> +			info->outer_l2_len);
> +			return -1;
> +		}
> +		if (info->outer_l3_len != m->outer_l3_len) {
> +			PMD_TX_LOG(ERR, "outer_l3_len error in mbuf.
> Original "
> +			"length: %hu,calculated length: %u", m-
> >outer_l3_len,
> +			info->outer_l3_len);
> +			return -1;
> +		}
> +	}
> +
> +	if (info->l2_len != m->l2_len) {
> +		PMD_TX_LOG(ERR, "l2_len error in mbuf. Original "
> +		"length: %hu, calculated length: %u", m->l2_len,
> +		info->l2_len);
> +		return -1;
> +	}
> +	if (info->l3_len != m->l3_len) {
> +		PMD_TX_LOG(ERR, "l3_len error in mbuf. Original "
> +		"length: %hu, calculated length: %u", m->l3_len,
> +		info->l3_len);
> +		return -1;
> +	}
> +	if (info->l4_len != m->l4_len) {
> +		PMD_TX_LOG(ERR, "l4_len error in mbuf. Original "
> +		"length: %hu, calculated length: %u", m->l4_len,
> +		info->l4_len);
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +static  inline int
> +check_ether_type(struct offload_info *info, struct rte_mbuf *m) {
> +	int ret = 0;
> +
> +	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
> +		if (info->outer_ethertype ==
> +			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> +			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) {
> +				PMD_TX_LOG(ERR, "Outer ethernet type is
> ipv4, "
> +				"tx offload missing
> `RTE_MBUF_F_TX_OUTER_IPV4` flag.");
> +				ret = -1;
> +			}
> +			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) {
> +				PMD_TX_LOG(ERR, "Outer ethernet type is
> ipv4, tx "
> +				"offload contains wrong
> `RTE_MBUF_F_TX_OUTER_IPV6` flag");
> +				ret = -1;
> +			}
> +		} else if (info->outer_ethertype ==
> +			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
> +			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) {
> +				PMD_TX_LOG(ERR, "Outer ethernet type is
> ipv6, "
> +				"tx offload missing
> `RTE_MBUF_F_TX_OUTER_IPV6` flag.");
> +				ret = -1;
> +			}
> +			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
> +				PMD_TX_LOG(ERR, "Outer ethernet type is
> ipv6, tx "
> +				"offload contains wrong
> `RTE_MBUF_F_TX_OUTER_IPV4` flag");
> +				ret = -1;
> +			}
> +		}
> +	}
> +
> +	if (info->ethertype ==
> +		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> +		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV4)) {
> +			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx offload
> "
> +			"missing `RTE_MBUF_F_TX_IPV4` flag.");
> +			ret = -1;
> +		}
> +		if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
> +			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx "
> +			"offload contains wrong `RTE_MBUF_F_TX_IPV6`
> flag");
> +			ret = -1;
> +		}
> +	} else if (info->ethertype ==
> +		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
> +		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV6)) {
> +			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload
> "
> +			"missing `RTE_MBUF_F_TX_IPV6` flag.");
> +			ret = -1;
> +		}
> +		if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
> +			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload
> "
> +			"contains wrong `RTE_MBUF_F_TX_IPV4` flag");
> +			ret = -1;
> +		}
> +	}
> +
> +	return ret;
> +}
> +
> +/* Check whether the parameters of mubf are correct. */ __rte_unused
> +static  inline int iavf_check_mbuf(struct rte_mbuf *m) {
> +	struct rte_ether_hdr *eth_hdr;
> +	void *l3_hdr = NULL; /* can be IPv4 or IPv6 */
> +	struct offload_info info = {0};
> +	uint64_t ol_flags = m->ol_flags;
> +	uint64_t tunnel_type = ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
> +
> +	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
> +	parse_ethernet(eth_hdr, &info);
> +	l3_hdr = (char *)eth_hdr + info.l2_len;
> +	if (info.l4_proto == IPPROTO_UDP) {
> +		struct rte_udp_hdr *udp_hdr;
> +
> +		udp_hdr = (struct rte_udp_hdr *)
> +			((char *)l3_hdr + info.l3_len);
> +		parse_gtp(udp_hdr, &info);
> +		if (info.is_tunnel) {
> +			if (!tunnel_type) {
> +				PMD_TX_LOG(ERR, "gtp tunnel packet
> missing tx "
> +				"offload missing
> `RTE_MBUF_F_TX_TUNNEL_GTP` flag.");
> +				return -1;
> +			}
> +			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GTP) {
> +				PMD_TX_LOG(ERR, "gtp tunnel packet, tx
> offload has wrong "
> +				"`%s` flag, correct is
> `RTE_MBUF_F_TX_TUNNEL_GTP` flag",
> +				rte_get_tx_ol_flag_name(tunnel_type));
> +				return -1;
> +			}
> +			goto check_len;
> +		}
> +		parse_vxlan_gpe(udp_hdr, &info);
> +		if (info.is_tunnel) {
> +			if (!tunnel_type) {
> +				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet
> missing tx "
> +				"offload missing
> `RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE` flag.");
> +				return -1;
> +			}
> +			if (tunnel_type !=
> RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE) {
> +				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet,
> tx offload has "
> +				"wrong `%s` flag, correct is "
> +				"`RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE`
> flag",
> +				rte_get_tx_ol_flag_name(tunnel_type));
> +				return -1;
> +			}
> +			goto check_len;
> +		}
> +		parse_vxlan(udp_hdr, &info);
> +		if (info.is_tunnel) {
> +			if (!tunnel_type) {
> +				PMD_TX_LOG(ERR, "vxlan tunnel packet
> missing tx "
> +				"offload missing
> `RTE_MBUF_F_TX_TUNNEL_VXLAN` flag.");
> +				return -1;
> +			}
> +			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN)
> {
> +				PMD_TX_LOG(ERR, "vxlan tunnel packet, tx
> offload has "
> +				"wrong `%s` flag, correct is "
> +				"`RTE_MBUF_F_TX_TUNNEL_VXLAN` flag",
> +				rte_get_tx_ol_flag_name(tunnel_type));
> +				return -1;
> +			}
> +			goto check_len;
> +		}
> +		parse_geneve(udp_hdr, &info);
> +		if (info.is_tunnel) {
> +			if (!tunnel_type) {
> +				PMD_TX_LOG(ERR, "geneve tunnel packet
> missing tx "
> +				"offload missing
> `RTE_MBUF_F_TX_TUNNEL_GENEVE` flag.");
> +				return -1;
> +			}
> +			if (tunnel_type !=
> RTE_MBUF_F_TX_TUNNEL_GENEVE) {
> +				PMD_TX_LOG(ERR, "geneve tunnel packet,
> tx offload has "
> +				"wrong `%s` flag, correct is "
> +				"`RTE_MBUF_F_TX_TUNNEL_GENEVE` flag",
> +				rte_get_tx_ol_flag_name(tunnel_type));
> +				return -1;
> +			}
> +			goto check_len;
> +		}
> +		/* Always keep last. */
> +		if (unlikely(RTE_ETH_IS_TUNNEL_PKT(m->packet_type)
> +			!= 0)) {
> +			PMD_TX_LOG(ERR, "Unknown tunnel packet. UDP
> dst port: %hu",
> +				udp_hdr->dst_port);
> +				return -1;
> +		}
> +	} else if (info.l4_proto == IPPROTO_GRE) {
> +		struct simple_gre_hdr *gre_hdr;
> +
> +		gre_hdr = (struct simple_gre_hdr *)((char *)l3_hdr +
> +			info.l3_len);
> +		parse_gre(gre_hdr, &info);
> +		if (info.is_tunnel) {
> +			if (!tunnel_type) {
> +				PMD_TX_LOG(ERR, "gre tunnel packet
> missing tx "
> +				"offload missing
> `RTE_MBUF_F_TX_TUNNEL_GRE` flag.");
> +				return -1;
> +			}
> +			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GRE) {
> +				PMD_TX_LOG(ERR, "gre tunnel packet, tx
> offload has "
> +				"wrong `%s` flag, correct is "
> +				"`RTE_MBUF_F_TX_TUNNEL_GRE` flag",
> +				rte_get_tx_ol_flag_name(tunnel_type));
> +				return -1;
> +			}
> +			goto check_len;
> +		}
> +	} else if (info.l4_proto == IPPROTO_IPIP) {
> +		void *encap_ip_hdr;
> +
> +		encap_ip_hdr = (char *)l3_hdr + info.l3_len;
> +		parse_encap_ip(encap_ip_hdr, &info);
> +		if (info.is_tunnel) {
> +			if (!tunnel_type) {
> +				PMD_TX_LOG(ERR, "Ipip tunnel packet
> missing tx "
> +				"offload missing
> `RTE_MBUF_F_TX_TUNNEL_IPIP` flag.");
> +				return -1;
> +			}
> +			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_IPIP) {
> +				PMD_TX_LOG(ERR, "Ipip tunnel packet, tx
> offload has "
> +				"wrong `%s` flag, correct is "
> +				"`RTE_MBUF_F_TX_TUNNEL_IPIP` flag",
> +				rte_get_tx_ol_flag_name(tunnel_type));
> +				return -1;
> +			}
> +			goto check_len;
> +		}
> +	}
> +
> +
> +
> +check_len:
> +	if (check_mbuf_len(&info, m) != 0)
> +		return -1;
> +
> +	return check_ether_type(&info, m);
> +}
> +
>  /* TX prep functions */
>  uint16_t
>  iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
> @@ -3017,6 +3657,13 @@ iavf_prep_pkts(__rte_unused void *tx_queue,
> struct rte_mbuf **tx_pkts,
>  				return i;
>  			}
>  		}
> +#ifdef RTE_ETHDEV_DEBUG_TX
> +		ret = iavf_check_mbuf(m);
> +		if (ret != 0) {
> +			rte_errno = EINVAL;
> +			return i;
> +		}
> +#endif
>  	}
> 
>  	return i;
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v3] net/iavf: add debug checks for mbuf
  2023-02-02 10:03 ` [PATCH v2] " Mingjin Ye
  2023-02-07  8:57   ` Ye, MingjinX
@ 2023-02-13  8:31   ` Mingjin Ye
  2023-02-19  8:30     ` Zhang, Qi Z
  1 sibling, 1 reply; 6+ messages in thread
From: Mingjin Ye @ 2023-02-13  8:31 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, stable, yidingx.zhou, Mingjin Ye, Jingjing Wu, Beilei Xing

The scalar Tx path would send wrong mbuf that causes the kernel driver to
fire the MDD event.

This patch adds mbuf detection in tx_prepare and enables it by defining
`RTE_ETHDEV_DEBUG_TX` macro to fix this issue. If validation fails,
rte_errno will be set to EINVAL and returned.

Fixes: 3fd32df381f8 ("net/iavf: check Tx packet with correct UP and queue")
Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction")
Fixes: f28fbd1e6b50 ("net/iavf: check max SIMD bitwidth")
Cc: stable@dpdk.org

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c | 645 +++++++++++++++++++++++++++++++++++
 1 file changed, 645 insertions(+)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 8d49967538..6a2ddf2aca 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -24,12 +24,55 @@
 #include <rte_ip.h>
 #include <rte_net.h>
 #include <rte_vect.h>
+#include <rte_vxlan.h>
+#include <rte_gtp.h>
+#include <rte_geneve.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
 #include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
+#define GRE_CHECKSUM_PRESENT	0x8000
+#define GRE_KEY_PRESENT		0x2000
+#define GRE_SEQUENCE_PRESENT	0x1000
+#define GRE_EXT_LEN		4
+#define GRE_SUPPORTED_FIELDS	(GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\
+				 GRE_SEQUENCE_PRESENT)
+
+#ifndef IPPROTO_IPIP
+#define IPPROTO_IPIP 4
+#endif
+#ifndef IPPROTO_GRE
+#define IPPROTO_GRE	47
+#endif
+
+static uint16_t vxlan_gpe_udp_port = RTE_VXLAN_GPE_DEFAULT_PORT;
+static uint16_t geneve_udp_port = RTE_GENEVE_DEFAULT_PORT;
+
+struct simple_gre_hdr {
+	uint16_t flags;
+	uint16_t proto;
+} __rte_packed;
+
+/* structure that caches offload info for the current packet */
+struct offload_info {
+	uint16_t ethertype;
+	uint8_t gso_enable;
+	uint16_t l2_len;
+	uint16_t l3_len;
+	uint16_t l4_len;
+	uint8_t l4_proto;
+	uint8_t is_tunnel;
+	uint16_t outer_ethertype;
+	uint16_t outer_l2_len;
+	uint16_t outer_l3_len;
+	uint8_t outer_l4_proto;
+	uint16_t tso_segsz;
+	uint16_t tunnel_tso_segsz;
+	uint32_t pkt_len;
+};
+
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
 
@@ -2949,6 +2992,600 @@ iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
 	}
 }
 
+/* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
+static inline void
+parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct offload_info *info)
+{
+	struct rte_tcp_hdr *tcp_hdr;
+
+	info->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
+	info->l4_proto = ipv4_hdr->next_proto_id;
+
+	/* only fill l4_len for TCP, it's useful for TSO */
+	if (info->l4_proto == IPPROTO_TCP) {
+		tcp_hdr = (struct rte_tcp_hdr *)
+			((char *)ipv4_hdr + info->l3_len);
+		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+	} else if (info->l4_proto == IPPROTO_UDP) {
+		info->l4_len = sizeof(struct rte_udp_hdr);
+	} else {
+		info->l4_len = 0;
+	}
+}
+
+/* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
+static inline void
+parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct offload_info *info)
+{
+	struct rte_tcp_hdr *tcp_hdr;
+
+	info->l3_len = sizeof(struct rte_ipv6_hdr);
+	info->l4_proto = ipv6_hdr->proto;
+
+	/* only fill l4_len for TCP, it's useful for TSO */
+	if (info->l4_proto == IPPROTO_TCP) {
+		tcp_hdr = (struct rte_tcp_hdr *)
+			((char *)ipv6_hdr + info->l3_len);
+		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+	} else if (info->l4_proto == IPPROTO_UDP) {
+		info->l4_len = sizeof(struct rte_udp_hdr);
+	} else {
+		info->l4_len = 0;
+	}
+}
+
+/*
+ * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
+ * ipproto. This function is able to recognize IPv4/IPv6 with optional VLAN
+ * headers. The l4_len argument is only set in case of TCP (useful for TSO).
+ */
+static inline void
+parse_ethernet(struct rte_ether_hdr *eth_hdr, struct offload_info *info)
+{
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_vlan_hdr *vlan_hdr;
+
+	info->l2_len = sizeof(struct rte_ether_hdr);
+	info->ethertype = eth_hdr->ether_type;
+
+	while (info->ethertype == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
+	       info->ethertype == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
+		vlan_hdr = (struct rte_vlan_hdr *)
+			((char *)eth_hdr + info->l2_len);
+		info->l2_len  += sizeof(struct rte_vlan_hdr);
+		info->ethertype = vlan_hdr->eth_proto;
+	}
+
+	switch (info->ethertype) {
+	case RTE_STATIC_BSWAP16(RTE_ETHER_TYPE_IPV4):
+		ipv4_hdr = (struct rte_ipv4_hdr *)
+			((char *)eth_hdr + info->l2_len);
+		parse_ipv4(ipv4_hdr, info);
+		break;
+	case RTE_STATIC_BSWAP16(RTE_ETHER_TYPE_IPV6):
+		ipv6_hdr = (struct rte_ipv6_hdr *)
+			((char *)eth_hdr + info->l2_len);
+		parse_ipv6(ipv6_hdr, info);
+		break;
+	default:
+		info->l4_len = 0;
+		info->l3_len = 0;
+		info->l4_proto = 0;
+		break;
+	}
+}
+
+/* Fill in outer layers length */
+static inline void
+update_tunnel_outer(struct offload_info *info)
+{
+	info->is_tunnel = 1;
+	info->outer_ethertype = info->ethertype;
+	info->outer_l2_len = info->l2_len;
+	info->outer_l3_len = info->l3_len;
+	info->outer_l4_proto = info->l4_proto;
+}
+
+/*
+ * Parse a GTP protocol header.
+ * No optional fields and next extension header type.
+ */
+static inline void
+parse_gtp(struct rte_udp_hdr *udp_hdr,
+	  struct offload_info *info)
+{
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_gtp_hdr *gtp_hdr;
+	uint8_t gtp_len = sizeof(*gtp_hdr);
+	uint8_t ip_ver;
+
+	/* Check udp destination port. */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(RTE_GTPC_UDP_PORT) &&
+	    udp_hdr->src_port != rte_cpu_to_be_16(RTE_GTPC_UDP_PORT) &&
+	    udp_hdr->dst_port != rte_cpu_to_be_16(RTE_GTPU_UDP_PORT))
+		return;
+
+	update_tunnel_outer(info);
+	info->l2_len = 0;
+
+	gtp_hdr = (struct rte_gtp_hdr *)((char *)udp_hdr +
+		  sizeof(struct rte_udp_hdr));
+
+	/*
+	 * Check message type. If message type is 0xff, it is
+	 * a GTP data packet. If not, it is a GTP control packet
+	 */
+	if (gtp_hdr->msg_type == 0xff) {
+		ip_ver = *(uint8_t *)((char *)udp_hdr +
+			 sizeof(struct rte_udp_hdr) +
+			 sizeof(struct rte_gtp_hdr));
+		ip_ver = (ip_ver) & 0xf0;
+
+		if (ip_ver == RTE_GTP_TYPE_IPV4) {
+			ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gtp_hdr +
+				   gtp_len);
+			info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+			parse_ipv4(ipv4_hdr, info);
+		} else if (ip_ver == RTE_GTP_TYPE_IPV6) {
+			ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gtp_hdr +
+				   gtp_len);
+			info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+			parse_ipv6(ipv6_hdr, info);
+		}
+	} else {
+		info->ethertype = 0;
+		info->l4_len = 0;
+		info->l3_len = 0;
+		info->l4_proto = 0;
+	}
+
+	info->l2_len += RTE_ETHER_GTP_HLEN;
+}
+
+/* Parse a vxlan header */
+static inline void
+parse_vxlan(struct rte_udp_hdr *udp_hdr,
+	    struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+
+	/* check udp destination port, RTE_VXLAN_DEFAULT_PORT (4789) is the
+	 * default vxlan port (rfc7348) or that the rx offload flag is set
+	 * (i40e only currently)
+	 */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(RTE_VXLAN_DEFAULT_PORT))
+		return;
+
+	update_tunnel_outer(info);
+
+	eth_hdr = (struct rte_ether_hdr *)((char *)udp_hdr +
+		sizeof(struct rte_udp_hdr) +
+		sizeof(struct rte_vxlan_hdr));
+
+	parse_ethernet(eth_hdr, info);
+	info->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */
+}
+
+/* Parse a vxlan-gpe header */
+static inline void
+parse_vxlan_gpe(struct rte_udp_hdr *udp_hdr,
+	    struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_vxlan_gpe_hdr *vxlan_gpe_hdr;
+	uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
+
+	/* Check udp destination port. */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(vxlan_gpe_udp_port))
+		return;
+
+	vxlan_gpe_hdr = (struct rte_vxlan_gpe_hdr *)((char *)udp_hdr +
+				sizeof(struct rte_udp_hdr));
+
+	if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
+	    RTE_VXLAN_GPE_TYPE_IPV4) {
+		update_tunnel_outer(info);
+
+		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)vxlan_gpe_hdr +
+			   vxlan_gpe_len);
+
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+		info->l2_len = 0;
+
+	} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) {
+		update_tunnel_outer(info);
+
+		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)vxlan_gpe_hdr +
+			   vxlan_gpe_len);
+
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+		parse_ipv6(ipv6_hdr, info);
+		info->l2_len = 0;
+
+	} else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) {
+		update_tunnel_outer(info);
+
+		eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_gpe_hdr +
+			  vxlan_gpe_len);
+
+		parse_ethernet(eth_hdr, info);
+	} else {
+		return;
+	}
+
+	info->l2_len += RTE_ETHER_VXLAN_GPE_HLEN;
+}
+
+/* Parse a geneve header */
+static inline void
+parse_geneve(struct rte_udp_hdr *udp_hdr,
+	    struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	struct rte_geneve_hdr *geneve_hdr;
+	uint16_t geneve_len;
+
+	/* Check udp destination port. */
+	if (udp_hdr->dst_port != rte_cpu_to_be_16(geneve_udp_port))
+		return;
+
+	geneve_hdr = (struct rte_geneve_hdr *)((char *)udp_hdr +
+				sizeof(struct rte_udp_hdr));
+	geneve_len = sizeof(struct rte_geneve_hdr) + geneve_hdr->opt_len * 4;
+	if (!geneve_hdr->proto || geneve_hdr->proto ==
+	    rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+		update_tunnel_outer(info);
+		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)geneve_hdr +
+			   geneve_len);
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+		info->l2_len = 0;
+	} else if (geneve_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+		update_tunnel_outer(info);
+		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)geneve_hdr +
+			   geneve_len);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+		parse_ipv6(ipv6_hdr, info);
+		info->l2_len = 0;
+
+	} else if (geneve_hdr->proto == rte_cpu_to_be_16(RTE_GENEVE_TYPE_ETH)) {
+		update_tunnel_outer(info);
+		eth_hdr = (struct rte_ether_hdr *)((char *)geneve_hdr +
+			  geneve_len);
+		parse_ethernet(eth_hdr, info);
+	} else {
+		return;
+	}
+
+	info->l2_len +=
+		(sizeof(struct rte_udp_hdr) + sizeof(struct rte_geneve_hdr) +
+		((struct rte_geneve_hdr *)geneve_hdr)->opt_len * 4);
+}
+
+/* Parse a gre header */
+static inline void
+parse_gre(struct simple_gre_hdr *gre_hdr, struct offload_info *info)
+{
+	struct rte_ether_hdr *eth_hdr;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	struct rte_ipv6_hdr *ipv6_hdr;
+	uint8_t gre_len = 0;
+
+	gre_len += sizeof(struct simple_gre_hdr);
+
+	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_KEY_PRESENT))
+		gre_len += GRE_EXT_LEN;
+	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_SEQUENCE_PRESENT))
+		gre_len += GRE_EXT_LEN;
+	if (gre_hdr->flags & rte_cpu_to_be_16(GRE_CHECKSUM_PRESENT))
+		gre_len += GRE_EXT_LEN;
+
+	if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+		update_tunnel_outer(info);
+
+		ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gre_hdr + gre_len);
+
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+		info->l2_len = 0;
+
+	} else if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+		update_tunnel_outer(info);
+
+		ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gre_hdr + gre_len);
+
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+		parse_ipv6(ipv6_hdr, info);
+		info->l2_len = 0;
+
+	} else if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
+		update_tunnel_outer(info);
+
+		eth_hdr = (struct rte_ether_hdr *)((char *)gre_hdr + gre_len);
+
+		parse_ethernet(eth_hdr, info);
+	} else {
+		return;
+	}
+
+	info->l2_len += gre_len;
+}
+
+/* Parse an encapsulated ip or ipv6 header */
+static inline void
+parse_encap_ip(void *encap_ip, struct offload_info *info)
+{
+	struct rte_ipv4_hdr *ipv4_hdr = encap_ip;
+	struct rte_ipv6_hdr *ipv6_hdr = encap_ip;
+	uint8_t ip_version;
+
+	ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
+
+	if (ip_version != 4 && ip_version != 6)
+		return;
+
+	info->is_tunnel = 1;
+	info->outer_ethertype = info->ethertype;
+	info->outer_l2_len = info->l2_len;
+	info->outer_l3_len = info->l3_len;
+
+	if (ip_version == 4) {
+		parse_ipv4(ipv4_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+	} else {
+		parse_ipv6(ipv6_hdr, info);
+		info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+	}
+	info->l2_len = 0;
+}
+
+static  inline int
+check_mbuf_len(struct offload_info *info, struct rte_mbuf *m)
+{
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+		if (info->outer_l2_len != m->outer_l2_len) {
+			PMD_TX_LOG(ERR, "outer_l2_len error in mbuf. Original "
+			"length: %hu, calculated length: %u", m->outer_l2_len,
+			info->outer_l2_len);
+			return -1;
+		}
+		if (info->outer_l3_len != m->outer_l3_len) {
+			PMD_TX_LOG(ERR, "outer_l3_len error in mbuf. Original "
+			"length: %hu,calculated length: %u", m->outer_l3_len,
+			info->outer_l3_len);
+			return -1;
+		}
+	}
+
+	if (info->l2_len != m->l2_len) {
+		PMD_TX_LOG(ERR, "l2_len error in mbuf. Original "
+		"length: %hu, calculated length: %u", m->l2_len,
+		info->l2_len);
+		return -1;
+	}
+	if (info->l3_len != m->l3_len) {
+		PMD_TX_LOG(ERR, "l3_len error in mbuf. Original "
+		"length: %hu, calculated length: %u", m->l3_len,
+		info->l3_len);
+		return -1;
+	}
+	if (info->l4_len != m->l4_len) {
+		PMD_TX_LOG(ERR, "l4_len error in mbuf. Original "
+		"length: %hu, calculated length: %u", m->l4_len,
+		info->l4_len);
+		return -1;
+	}
+
+	return 0;
+}
+
+static  inline int
+check_ether_type(struct offload_info *info, struct rte_mbuf *m)
+{
+	int ret = 0;
+
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+		if (info->outer_ethertype ==
+			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, "
+				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV4` flag.");
+				ret = -1;
+			}
+			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, tx "
+				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV6` flag");
+				ret = -1;
+			}
+		} else if (info->outer_ethertype ==
+			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, "
+				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV6` flag.");
+				ret = -1;
+			}
+			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
+				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, tx "
+				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV4` flag");
+				ret = -1;
+			}
+		}
+	}
+
+	if (info->ethertype ==
+		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
+		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV4)) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx offload "
+			"missing `RTE_MBUF_F_TX_IPV4` flag.");
+			ret = -1;
+		}
+		if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx "
+			"offload contains wrong `RTE_MBUF_F_TX_IPV6` flag");
+			ret = -1;
+		}
+	} else if (info->ethertype ==
+		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
+		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV6)) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			"missing `RTE_MBUF_F_TX_IPV6` flag.");
+			ret = -1;
+		}
+		if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
+			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			"contains wrong `RTE_MBUF_F_TX_IPV4` flag");
+			ret = -1;
+		}
+	}
+
+	return ret;
+}
+
+/* Check whether the parameters of mubf are correct. */
+__rte_unused static  inline int
+iavf_check_mbuf(struct rte_mbuf *m)
+{
+	struct rte_ether_hdr *eth_hdr;
+	void *l3_hdr = NULL; /* can be IPv4 or IPv6 */
+	struct offload_info info = {0};
+	uint64_t ol_flags = m->ol_flags;
+	uint64_t tunnel_type = ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
+
+	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+	parse_ethernet(eth_hdr, &info);
+	l3_hdr = (char *)eth_hdr + info.l2_len;
+	if (info.l4_proto == IPPROTO_UDP) {
+		struct rte_udp_hdr *udp_hdr;
+
+		udp_hdr = (struct rte_udp_hdr *)
+			((char *)l3_hdr + info.l3_len);
+		parse_gtp(udp_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "gtp tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_GTP` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GTP) {
+				PMD_TX_LOG(ERR, "gtp tunnel packet, tx offload has wrong "
+				"`%s` flag, correct is `RTE_MBUF_F_TX_TUNNEL_GTP` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+		parse_vxlan_gpe(udp_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE) {
+				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+		parse_vxlan(udp_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "vxlan tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_VXLAN` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN) {
+				PMD_TX_LOG(ERR, "vxlan tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_VXLAN` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+		parse_geneve(udp_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "geneve tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_GENEVE` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GENEVE) {
+				PMD_TX_LOG(ERR, "geneve tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_GENEVE` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+		/* Always keep last. */
+		if (unlikely(RTE_ETH_IS_TUNNEL_PKT(m->packet_type)
+			!= 0)) {
+			PMD_TX_LOG(ERR, "Unknown tunnel packet. UDP dst port: %hu",
+				udp_hdr->dst_port);
+				return -1;
+		}
+	} else if (info.l4_proto == IPPROTO_GRE) {
+		struct simple_gre_hdr *gre_hdr;
+
+		gre_hdr = (struct simple_gre_hdr *)((char *)l3_hdr +
+			info.l3_len);
+		parse_gre(gre_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "gre tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_GRE` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GRE) {
+				PMD_TX_LOG(ERR, "gre tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_GRE` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+	} else if (info.l4_proto == IPPROTO_IPIP) {
+		void *encap_ip_hdr;
+
+		encap_ip_hdr = (char *)l3_hdr + info.l3_len;
+		parse_encap_ip(encap_ip_hdr, &info);
+		if (info.is_tunnel) {
+			if (!tunnel_type) {
+				PMD_TX_LOG(ERR, "Ipip tunnel packet missing tx "
+				"offload missing `RTE_MBUF_F_TX_TUNNEL_IPIP` flag.");
+				return -1;
+			}
+			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_IPIP) {
+				PMD_TX_LOG(ERR, "Ipip tunnel packet, tx offload has "
+				"wrong `%s` flag, correct is "
+				"`RTE_MBUF_F_TX_TUNNEL_IPIP` flag",
+				rte_get_tx_ol_flag_name(tunnel_type));
+				return -1;
+			}
+			goto check_len;
+		}
+	}
+
+check_len:
+	if (check_mbuf_len(&info, m) != 0)
+		return -1;
+
+	return check_ether_type(&info, m);
+}
+
 /* TX prep functions */
 uint16_t
 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -3017,6 +3654,14 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 				return i;
 			}
 		}
+
+#ifdef RTE_ETHDEV_DEBUG_TX
+		ret = iavf_check_mbuf(m);
+		if (ret != 0) {
+			rte_errno = EINVAL;
+			return i;
+		}
+#endif
 	}
 
 	return i;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: [PATCH v3] net/iavf: add debug checks for mbuf
  2023-02-13  8:31   ` [PATCH v3] net/iavf: add debug checks " Mingjin Ye
@ 2023-02-19  8:30     ` Zhang, Qi Z
  2023-02-19 14:30       ` Thomas Monjalon
  0 siblings, 1 reply; 6+ messages in thread
From: Zhang, Qi Z @ 2023-02-19  8:30 UTC (permalink / raw)
  To: Ye, MingjinX, dev
  Cc: Yang, Qiming, stable, Zhou, YidingX, Ye, MingjinX, Wu, Jingjing,
	Xing, Beilei



> -----Original Message-----
> From: Mingjin Ye <mingjinx.ye@intel.com>
> Sent: Monday, February 13, 2023 4:32 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; stable@dpdk.org; Zhou, YidingX
> <yidingx.zhou@intel.com>; Ye, MingjinX <mingjinx.ye@intel.com>; Wu,
> Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>
> Subject: [PATCH v3] net/iavf: add debug checks for mbuf
> 
> The scalar Tx path would send wrong mbuf that causes the kernel driver to
> fire the MDD event.
> 
> This patch adds mbuf detection in tx_prepare and enables it by defining
> `RTE_ETHDEV_DEBUG_TX` macro to fix this issue. If validation fails, rte_errno

This is a debug utility but not a fix, enable this check will significantly downgrade the performance
Removed  "to fix this issue"

> will be set to EINVAL and returned.
> 
> Fixes: 3fd32df381f8 ("net/iavf: check Tx packet with correct UP and queue")
> Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction")
> Fixes: f28fbd1e6b50 ("net/iavf: check max SIMD bitwidth")

Removed all the fix lines

> Cc: stable@dpdk.org
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel after apply above change.

Thanks
Qi


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3] net/iavf: add debug checks for mbuf
  2023-02-19  8:30     ` Zhang, Qi Z
@ 2023-02-19 14:30       ` Thomas Monjalon
  0 siblings, 0 replies; 6+ messages in thread
From: Thomas Monjalon @ 2023-02-19 14:30 UTC (permalink / raw)
  To: Ye, MingjinX, Zhang, Qi Z
  Cc: dev, Yang, Qiming, stable, Zhou, YidingX, Ye, MingjinX, Wu,
	Jingjing, Xing, Beilei

19/02/2023 09:30, Zhang, Qi Z:
> From: Mingjin Ye <mingjinx.ye@intel.com>
> > 
> > The scalar Tx path would send wrong mbuf that causes the kernel driver to
> > fire the MDD event.
> > 
> > This patch adds mbuf detection in tx_prepare and enables it by defining
> > `RTE_ETHDEV_DEBUG_TX` macro to fix this issue. If validation fails, rte_errno
> 
> This is a debug utility but not a fix, enable this check will significantly downgrade the performance
> Removed  "to fix this issue"
> 
> > will be set to EINVAL and returned.
> > 
> > Fixes: 3fd32df381f8 ("net/iavf: check Tx packet with correct UP and queue")
> > Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction")
> > Fixes: f28fbd1e6b50 ("net/iavf: check max SIMD bitwidth")
> 
> Removed all the fix lines
> 
> > Cc: stable@dpdk.org

I think the "stable line" should be removed as well.
It is a new debug feature, we should not try to backport it.




^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-02-19 14:30 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-31 10:06 [PATCH] net/iavf: add check for mbuf Mingjin Ye
2023-02-02 10:03 ` [PATCH v2] " Mingjin Ye
2023-02-07  8:57   ` Ye, MingjinX
2023-02-13  8:31   ` [PATCH v3] net/iavf: add debug checks " Mingjin Ye
2023-02-19  8:30     ` Zhang, Qi Z
2023-02-19 14:30       ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).