From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B0B5641B8D; Tue, 31 Jan 2023 11:10:11 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4990040DFB; Tue, 31 Jan 2023 11:10:11 +0100 (CET) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 03EBC4067B; Tue, 31 Jan 2023 11:10:08 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1675159809; x=1706695809; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=1lhrMFG8adQRA0wxWsZRUaSgj2WTF8ChrYVEPNEcw0w=; b=VcE8rf73U/qwOYfvvOnu+Qa6FLhIs2V01FGSe7T+XTdJeE9uyIRfGMGZ a/KAod+GCdA412E8k/+AyLHfRl194tZT/+tWJgrrJcvPYrwvbZFCzMD7R hEjY9cYtZVhmQt1r4KD1ceGw7B4imoazUOdUZsuyyOmojK7mz5cyBxsc7 CvjRIAERPg7XNIYiRtrzOfaxC+vaZjhHZJsbnCa05zpChYaDSe2keerT4 sDs3gMkjIN7ztYvGzyGRkJjZISBKIj1VEPd7iQ1xNI3SOQedqHJJbaJUI 7yYXrT5B3+7YWB0VHFV4NsqG+LRoo+NWXCeN++a6T0CwhDsZNpTMbLCwh g==; X-IronPort-AV: E=McAfee;i="6500,9779,10606"; a="390165073" X-IronPort-AV: E=Sophos;i="5.97,261,1669104000"; d="scan'208";a="390165073" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 31 Jan 2023 02:10:07 -0800 X-IronPort-AV: E=McAfee;i="6500,9779,10606"; a="807047761" X-IronPort-AV: E=Sophos;i="5.97,261,1669104000"; d="scan'208";a="807047761" Received: from unknown (HELO localhost.localdomain) ([10.239.252.253]) by fmsmga001-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 31 Jan 2023 02:10:04 -0800 From: Mingjin Ye To: dev@dpdk.org Cc: qiming.yang@intel.com, yidingx.zhou@intel.com, Mingjin Ye , stable@dpdk.org, Jingjing Wu , Beilei Xing Subject: [PATCH] net/iavf: add check for mbuf Date: Tue, 31 Jan 2023 10:06:53 +0000 Message-Id: <20230131100653.336119-1-mingjinx.ye@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The scalar Tx path would send wrong mbuf that causes the kernel driver to fire the MDD event. This patch adds mbuf detection in tx_prepare to fix this issue, rte_errno will be set to EINVAL and returned if the verification fails. Fixes: 3fd32df381f8 ("net/iavf: check Tx packet with correct UP and queue") Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction") Fixes: f28fbd1e6b50 ("net/iavf: check max SIMD bitwidth") Cc: stable@dpdk.org Signed-off-by: Mingjin Ye --- drivers/net/iavf/iavf_rxtx.c | 599 +++++++++++++++++++++++++++++++++++ 1 file changed, 599 insertions(+) diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index 8d49967538..63012fdc28 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -24,12 +24,55 @@ #include #include #include +#include +#include +#include #include "iavf.h" #include "iavf_rxtx.h" #include "iavf_ipsec_crypto.h" #include "rte_pmd_iavf.h" +#define GRE_CHECKSUM_PRESENT 0x8000 +#define GRE_KEY_PRESENT 0x2000 +#define GRE_SEQUENCE_PRESENT 0x1000 +#define GRE_EXT_LEN 4 +#define GRE_SUPPORTED_FIELDS (GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\ + GRE_SEQUENCE_PRESENT) + +#ifndef IPPROTO_IPIP +#define IPPROTO_IPIP 4 +#endif +#ifndef IPPROTO_GRE +#define IPPROTO_GRE 47 +#endif + +static uint16_t vxlan_gpe_udp_port = RTE_VXLAN_GPE_DEFAULT_PORT; +static uint16_t geneve_udp_port = RTE_GENEVE_DEFAULT_PORT; + +struct simple_gre_hdr { + uint16_t flags; + uint16_t proto; +} __rte_packed; + +/* structure that caches offload info for the current packet */ +struct offload_info { + uint16_t ethertype; + uint8_t gso_enable; + uint16_t l2_len; + uint16_t l3_len; + uint16_t l4_len; + uint8_t l4_proto; + uint8_t is_tunnel; + uint16_t outer_ethertype; + uint16_t outer_l2_len; + uint16_t outer_l3_len; + uint8_t outer_l4_proto; + uint16_t tso_segsz; + uint16_t tunnel_tso_segsz; + uint32_t pkt_len; +}; + /* Offset of mbuf dynamic field for protocol extraction's metadata */ int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1; @@ -2949,6 +2992,555 @@ iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m) } } +/* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */ +static inline void +parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct offload_info *info) +{ + struct rte_tcp_hdr *tcp_hdr; + + info->l3_len = rte_ipv4_hdr_len(ipv4_hdr); + info->l4_proto = ipv4_hdr->next_proto_id; + + /* only fill l4_len for TCP, it's useful for TSO */ + if (info->l4_proto == IPPROTO_TCP) { + tcp_hdr = (struct rte_tcp_hdr *) + ((char *)ipv4_hdr + info->l3_len); + info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; + } else if (info->l4_proto == IPPROTO_UDP) { + info->l4_len = sizeof(struct rte_udp_hdr); + } else { + info->l4_len = 0; + } +} + +/* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */ +static inline void +parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct offload_info *info) +{ + struct rte_tcp_hdr *tcp_hdr; + + info->l3_len = sizeof(struct rte_ipv6_hdr); + info->l4_proto = ipv6_hdr->proto; + + /* only fill l4_len for TCP, it's useful for TSO */ + if (info->l4_proto == IPPROTO_TCP) { + tcp_hdr = (struct rte_tcp_hdr *) + ((char *)ipv6_hdr + info->l3_len); + info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; + } else if (info->l4_proto == IPPROTO_UDP) { + info->l4_len = sizeof(struct rte_udp_hdr); + } else { + info->l4_len = 0; + } +} + +/* + * Parse an ethernet header to fill the ethertype, l2_len, l3_len and + * ipproto. This function is able to recognize IPv4/IPv6 with optional VLAN + * headers. The l4_len argument is only set in case of TCP (useful for TSO). + */ +static inline void +parse_ethernet(struct rte_ether_hdr *eth_hdr, struct offload_info *info) +{ + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + struct rte_vlan_hdr *vlan_hdr; + + info->l2_len = sizeof(struct rte_ether_hdr); + info->ethertype = eth_hdr->ether_type; + + while (info->ethertype == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) || + info->ethertype == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) { + vlan_hdr = (struct rte_vlan_hdr *) + ((char *)eth_hdr + info->l2_len); + info->l2_len += sizeof(struct rte_vlan_hdr); + info->ethertype = vlan_hdr->eth_proto; + } + + switch (info->ethertype) { + case RTE_STATIC_BSWAP16(RTE_ETHER_TYPE_IPV4): + ipv4_hdr = (struct rte_ipv4_hdr *) + ((char *)eth_hdr + info->l2_len); + parse_ipv4(ipv4_hdr, info); + break; + case RTE_STATIC_BSWAP16(RTE_ETHER_TYPE_IPV6): + ipv6_hdr = (struct rte_ipv6_hdr *) + ((char *)eth_hdr + info->l2_len); + parse_ipv6(ipv6_hdr, info); + break; + default: + info->l4_len = 0; + info->l3_len = 0; + info->l4_proto = 0; + break; + } +} + +/* Fill in outer layers length */ +static inline void +update_tunnel_outer(struct offload_info *info) +{ + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; +} + +/* + * Parse a GTP protocol header. + * No optional fields and next extension header type. + */ +static inline void +parse_gtp(struct rte_udp_hdr *udp_hdr, + struct offload_info *info) +{ + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + struct rte_gtp_hdr *gtp_hdr; + uint8_t gtp_len = sizeof(*gtp_hdr); + uint8_t ip_ver; + + /* Check udp destination port. */ + if (udp_hdr->dst_port != rte_cpu_to_be_16(RTE_GTPC_UDP_PORT) && + udp_hdr->src_port != rte_cpu_to_be_16(RTE_GTPC_UDP_PORT) && + udp_hdr->dst_port != rte_cpu_to_be_16(RTE_GTPU_UDP_PORT)) + return; + + update_tunnel_outer(info); + info->l2_len = 0; + + gtp_hdr = (struct rte_gtp_hdr *)((char *)udp_hdr + + sizeof(struct rte_udp_hdr)); + + /* + * Check message type. If message type is 0xff, it is + * a GTP data packet. If not, it is a GTP control packet + */ + if (gtp_hdr->msg_type == 0xff) { + ip_ver = *(uint8_t *)((char *)udp_hdr + + sizeof(struct rte_udp_hdr) + + sizeof(struct rte_gtp_hdr)); + ip_ver = (ip_ver) & 0xf0; + + if (ip_ver == RTE_GTP_TYPE_IPV4) { + ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gtp_hdr + + gtp_len); + info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + parse_ipv4(ipv4_hdr, info); + } else if (ip_ver == RTE_GTP_TYPE_IPV6) { + ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gtp_hdr + + gtp_len); + info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + parse_ipv6(ipv6_hdr, info); + } + } else { + info->ethertype = 0; + info->l4_len = 0; + info->l3_len = 0; + info->l4_proto = 0; + } + + info->l2_len += RTE_ETHER_GTP_HLEN; +} + +/* Parse a vxlan header */ +static inline void +parse_vxlan(struct rte_udp_hdr *udp_hdr, + struct offload_info *info) +{ + struct rte_ether_hdr *eth_hdr; + + /* check udp destination port, RTE_VXLAN_DEFAULT_PORT (4789) is the + * default vxlan port (rfc7348) or that the rx offload flag is set + * (i40e only currently) + */ + if (udp_hdr->dst_port != rte_cpu_to_be_16(RTE_VXLAN_DEFAULT_PORT)) + return; + + update_tunnel_outer(info); + + eth_hdr = (struct rte_ether_hdr *)((char *)udp_hdr + + sizeof(struct rte_udp_hdr) + + sizeof(struct rte_vxlan_hdr)); + + parse_ethernet(eth_hdr, info); + info->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */ +} + +/* Parse a vxlan-gpe header */ +static inline void +parse_vxlan_gpe(struct rte_udp_hdr *udp_hdr, + struct offload_info *info) +{ + struct rte_ether_hdr *eth_hdr; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + struct rte_vxlan_gpe_hdr *vxlan_gpe_hdr; + uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr); + + /* Check udp destination port. */ + if (udp_hdr->dst_port != rte_cpu_to_be_16(vxlan_gpe_udp_port)) + return; + + vxlan_gpe_hdr = (struct rte_vxlan_gpe_hdr *)((char *)udp_hdr + + sizeof(struct rte_udp_hdr)); + + if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto == + RTE_VXLAN_GPE_TYPE_IPV4) { + update_tunnel_outer(info); + + ipv4_hdr = (struct rte_ipv4_hdr *)((char *)vxlan_gpe_hdr + + vxlan_gpe_len); + + parse_ipv4(ipv4_hdr, info); + info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + info->l2_len = 0; + + } else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) { + update_tunnel_outer(info); + + ipv6_hdr = (struct rte_ipv6_hdr *)((char *)vxlan_gpe_hdr + + vxlan_gpe_len); + + info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + parse_ipv6(ipv6_hdr, info); + info->l2_len = 0; + + } else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) { + update_tunnel_outer(info); + + eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_gpe_hdr + + vxlan_gpe_len); + + parse_ethernet(eth_hdr, info); + } else { + return; + } + + + info->l2_len += RTE_ETHER_VXLAN_GPE_HLEN; +} + +/* Parse a geneve header */ +static inline void +parse_geneve(struct rte_udp_hdr *udp_hdr, + struct offload_info *info) +{ + struct rte_ether_hdr *eth_hdr; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + struct rte_geneve_hdr *geneve_hdr; + uint16_t geneve_len; + + /* Check udp destination port. */ + if (udp_hdr->dst_port != rte_cpu_to_be_16(geneve_udp_port)) + return; + + geneve_hdr = (struct rte_geneve_hdr *)((char *)udp_hdr + + sizeof(struct rte_udp_hdr)); + geneve_len = sizeof(struct rte_geneve_hdr) + geneve_hdr->opt_len * 4; + if (!geneve_hdr->proto || geneve_hdr->proto == + rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + update_tunnel_outer(info); + ipv4_hdr = (struct rte_ipv4_hdr *)((char *)geneve_hdr + + geneve_len); + parse_ipv4(ipv4_hdr, info); + info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + info->l2_len = 0; + } else if (geneve_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + update_tunnel_outer(info); + ipv6_hdr = (struct rte_ipv6_hdr *)((char *)geneve_hdr + + geneve_len); + info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + parse_ipv6(ipv6_hdr, info); + info->l2_len = 0; + + } else if (geneve_hdr->proto == rte_cpu_to_be_16(RTE_GENEVE_TYPE_ETH)) { + update_tunnel_outer(info); + eth_hdr = (struct rte_ether_hdr *)((char *)geneve_hdr + + geneve_len); + parse_ethernet(eth_hdr, info); + } else { + return; + } + + info->l2_len += + (sizeof(struct rte_udp_hdr) + sizeof(struct rte_geneve_hdr) + + ((struct rte_geneve_hdr *)geneve_hdr)->opt_len * 4); +} + +/* Parse a gre header */ +static inline void +parse_gre(struct simple_gre_hdr *gre_hdr, struct offload_info *info) +{ + struct rte_ether_hdr *eth_hdr; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + uint8_t gre_len = 0; + + gre_len += sizeof(struct simple_gre_hdr); + + if (gre_hdr->flags & rte_cpu_to_be_16(GRE_KEY_PRESENT)) + gre_len += GRE_EXT_LEN; + if (gre_hdr->flags & rte_cpu_to_be_16(GRE_SEQUENCE_PRESENT)) + gre_len += GRE_EXT_LEN; + if (gre_hdr->flags & rte_cpu_to_be_16(GRE_CHECKSUM_PRESENT)) + gre_len += GRE_EXT_LEN; + + if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + update_tunnel_outer(info); + + ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gre_hdr + gre_len); + + parse_ipv4(ipv4_hdr, info); + info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + info->l2_len = 0; + + } else if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + update_tunnel_outer(info); + + ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gre_hdr + gre_len); + + info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + parse_ipv6(ipv6_hdr, info); + info->l2_len = 0; + + } else if (gre_hdr->proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) { + update_tunnel_outer(info); + + eth_hdr = (struct rte_ether_hdr *)((char *)gre_hdr + gre_len); + + parse_ethernet(eth_hdr, info); + } else { + return; + } + + info->l2_len += gre_len; +} + +/* Parse an encapsulated ip or ipv6 header */ +static inline void +parse_encap_ip(void *encap_ip, struct offload_info *info) +{ + struct rte_ipv4_hdr *ipv4_hdr = encap_ip; + struct rte_ipv6_hdr *ipv6_hdr = encap_ip; + uint8_t ip_version; + + ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4; + + if (ip_version != 4 && ip_version != 6) + return; + + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + + if (ip_version == 4) { + parse_ipv4(ipv4_hdr, info); + info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + } else { + parse_ipv6(ipv6_hdr, info); + info->ethertype = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + } + info->l2_len = 0; +} + +static inline int +check_mbuf_len(struct offload_info *info, struct rte_mbuf *m) +{ + if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { + if (info->outer_l2_len != m->outer_l2_len) { + PMD_TX_LOG(ERR, "outer_l2_len error in mbuf. Original " + "length: %hu, calculated length: %u", m->outer_l2_len, + info->outer_l2_len); + return -1; + } + if (info->outer_l3_len != m->outer_l3_len) { + PMD_TX_LOG(ERR, "outer_l3_len error in mbuf. Original " + "length: %hu,calculated length: %u", m->outer_l3_len, + info->outer_l3_len); + return -1; + } + } + + if (info->l2_len != m->l2_len) { + PMD_TX_LOG(ERR, "l2_len error in mbuf. Original " + "length: %hu, calculated length: %u", m->l2_len, + info->l2_len); + return -1; + } + if (info->l3_len != m->l3_len) { + PMD_TX_LOG(ERR, "l3_len error in mbuf. Original " + "length: %hu, calculated length: %u", m->l3_len, + info->l3_len); + return -1; + } + if (info->l4_len != m->l4_len) { + PMD_TX_LOG(ERR, "l4_len error in mbuf. Original " + "length: %hu, calculated length: %u", m->l4_len, + info->l4_len); + return -1; + } + + return 0; +} + +static inline int +check_ether_type(struct offload_info *info, struct rte_mbuf *m) +{ + int ret = 0; + + if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { + if (info->outer_ethertype == + rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + if (~(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) { + PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, " + "tx offload missing `RTE_MBUF_F_TX_OUTER_IPV4` flag."); + ret = -1; + } + if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) { + PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, tx " + "offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV6` flag"); + ret = -1; + } + } else if (info->outer_ethertype == + rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + if (~(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) { + PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, " + "tx offload missing `RTE_MBUF_F_TX_OUTER_IPV6` flag."); + ret = -1; + } + if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { + PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, tx " + "offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV4` flag"); + ret = -1; + } + } + } + + if (info->ethertype == + rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + if (~(m->ol_flags & RTE_MBUF_F_TX_IPV4)) { + PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx offload " + "missing `RTE_MBUF_F_TX_IPV4` flag."); + ret = -1; + } + if (m->ol_flags & RTE_MBUF_F_TX_IPV6) { + PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx " + "offload contains wrong `RTE_MBUF_F_TX_IPV6` flag"); + ret = -1; + } + } else if (info->ethertype == + rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + if (~(m->ol_flags & RTE_MBUF_F_TX_IPV6)) { + PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload " + "missing `RTE_MBUF_F_TX_IPV6` flag."); + ret = -1; + } + if (m->ol_flags & RTE_MBUF_F_TX_IPV4) { + PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload " + "contains wrong `RTE_MBUF_F_TX_IPV4` flag"); + ret = -1; + } + } + + return ret; +} + +/* Check whether the parameters of mubf are correct. */ +__rte_unused static inline int +iavf_check_mbuf(struct rte_mbuf *m) +{ + struct rte_ether_hdr *eth_hdr; + void *l3_hdr = NULL; /* can be IPv4 or IPv6 */ + struct offload_info info = {0}; + uint64_t ol_flags = m->ol_flags; + + eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + parse_ethernet(eth_hdr, &info); + l3_hdr = (char *)eth_hdr + info.l2_len; + if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { + if (info.l4_proto == IPPROTO_UDP) { + struct rte_udp_hdr *udp_hdr; + + udp_hdr = (struct rte_udp_hdr *) + ((char *)l3_hdr + info.l3_len); + parse_gtp(udp_hdr, &info); + if (info.is_tunnel) { + if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GTP)) { + PMD_TX_LOG(ERR, "gtp tunnel packet missing tx flag"); + return -1; + } + goto check_len; + } + parse_vxlan_gpe(udp_hdr, &info); + if (info.is_tunnel) { + if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE)) { + PMD_TX_LOG(ERR, "vxlan gpe tunnel packet missing tx flag"); + return -1; + } + goto check_len; + } + parse_vxlan(udp_hdr, &info); + if (info.is_tunnel) { + if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN)) { + PMD_TX_LOG(ERR, "vxlan tunnel packet missing tx flag"); + return -1; + } + goto check_len; + } + parse_geneve(udp_hdr, &info); + if (info.is_tunnel) { + if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GENEVE)) { + PMD_TX_LOG(ERR, "geneve tunnel packet missing tx flag"); + return -1; + } + goto check_len; + } + /* Always keep last. */ + if (unlikely(RTE_ETH_IS_TUNNEL_PKT(m->packet_type) + != 0)) { + PMD_TX_LOG(ERR, "Unknown tunnel packet. UDP dst port: %hu", + udp_hdr->dst_port); + return -1; + } + } else if (info.l4_proto == IPPROTO_GRE) { + struct simple_gre_hdr *gre_hdr; + + gre_hdr = (struct simple_gre_hdr *)((char *)l3_hdr + + info.l3_len); + parse_gre(gre_hdr, &info); + if (info.is_tunnel) { + if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GRE)) { + PMD_TX_LOG(ERR, "gre tunnel packet missing tx flag"); + return -1; + } + goto check_len; + } + } else if (info.l4_proto == IPPROTO_IPIP) { + void *encap_ip_hdr; + + encap_ip_hdr = (char *)l3_hdr + info.l3_len; + parse_encap_ip(encap_ip_hdr, &info); + if (info.is_tunnel) { + if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IPIP)) { + PMD_TX_LOG(ERR, "Ipip tunnel packet missing tx flag"); + return -1; + } + goto check_len; + } + } + } + +check_len: + if (check_mbuf_len(&info, m) != 0) + return -1; + + return check_ether_type(&info, m); +} + /* TX prep functions */ uint16_t iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, @@ -3017,6 +3609,13 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } } +#ifdef RTE_ETHDEV_DEBUG_TX + ret = iavf_check_mbuf(m); + if (ret != 0) { + rte_errno = EINVAL; + return i; + } +#endif } return i; -- 2.25.1