From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 40150A0548 for ; Mon, 21 Jun 2021 10:52:33 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 32B204119D; Mon, 21 Jun 2021 10:52:33 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id E70E341163; Mon, 21 Jun 2021 10:52:29 +0200 (CEST) IronPort-SDR: vYHIPwcGvHKVcRm8Y3RIuvdUon5LbhaPQ2r3LdGhnBEF+sRMrLkr1r/OtT1TjIFH0tJSwXqE9k 46tbkY0zCJzA== X-IronPort-AV: E=McAfee;i="6200,9189,10021"; a="206750942" X-IronPort-AV: E=Sophos;i="5.83,289,1616482800"; d="scan'208";a="206750942" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Jun 2021 01:52:27 -0700 IronPort-SDR: IyZiqgjnt1oPJ7Paqix9mrscN6i8TGZeV3LhbuELayDpuro2hYYJ5GZC6V1y26PhqnVYvJeuVK DPATREOe+nrg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.83,289,1616482800"; d="scan'208";a="452114783" Received: from dpdk-xiao1.sh.intel.com ([10.67.110.226]) by orsmga008.jf.intel.com with ESMTP; 21 Jun 2021 01:52:25 -0700 From: Xiao Wang To: maxime.coquelin@redhat.com, chenbo.xia@intel.com, david.marchand@redhat.com Cc: cheng1.jiang@intel.com, dev@dpdk.org, Xiao Wang , stable@dpdk.org Date: Mon, 21 Jun 2021 16:21:04 +0800 Message-Id: <20210621082104.76733-1-xiao.w.wang@intel.com> X-Mailer: git-send-email 2.15.1 In-Reply-To: <20210317063109.135662-1-xiao.w.wang@intel.com> References: <20210317063109.135662-1-xiao.w.wang@intel.com> Subject: [dpdk-stable] [PATCH v5] vhost: check header for legacy dequeue offload X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: stable-bounces@dpdk.org Sender: "stable" When parsing the virtio net header and packet header for dequeue offload, we need to perform sanity check on the packet header to ensure: - No out-of-boundary memory access. - The packet header and virtio_net header are valid and aligned. Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities") Cc: stable@dpdk.org Signed-off-by: Xiao Wang --- v5: - Redefine the function parse_ethernet() to parse_headers(). (David) - Use mbuf helpers e.g. rte_pktmbuf_data_len() and rte_pktmbuf_mtod_offset(). (David) - Reset mbuf l2_len, l3_len and ol_flags when detecting anything invalid. (David) - Improve some check conditions. (David) - Move the data_len check for L4 header into parse_headers(), in order to avoid duplicated checks in CSUM and GSO. - Use uint8_t instead of uint16_t for l4_proto variable. - Detect more invalid corner cases. v4: - Rebase on head of main branch. - Allow empty L4 payload in GSO. v3: - Check data_len before calling rte_pktmbuf_mtod. (David) v2: - Allow empty L4 payload for cksum offload. (Konstantin) --- lib/vhost/virtio_net.c | 117 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 89 insertions(+), 28 deletions(-) diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index 8da8a86a10..fb21b56558 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -2259,14 +2259,17 @@ virtio_net_with_host_offload(struct virtio_net *dev) return false; } -static void -parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr) +static int +parse_headers(struct rte_mbuf *m, uint8_t *l4_proto) { struct rte_ipv4_hdr *ipv4_hdr; struct rte_ipv6_hdr *ipv6_hdr; - void *l3_hdr = NULL; struct rte_ether_hdr *eth_hdr; uint16_t ethertype; + uint16_t data_len = rte_pktmbuf_data_len(m); + + if (data_len < sizeof(struct rte_ether_hdr)) + return -EINVAL; eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); @@ -2274,6 +2277,10 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr) ethertype = rte_be_to_cpu_16(eth_hdr->ether_type); if (ethertype == RTE_ETHER_TYPE_VLAN) { + if (data_len < sizeof(struct rte_ether_hdr) + + sizeof(struct rte_vlan_hdr)) + goto error; + struct rte_vlan_hdr *vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1); @@ -2281,70 +2288,118 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr) ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto); } - l3_hdr = (char *)eth_hdr + m->l2_len; - switch (ethertype) { case RTE_ETHER_TYPE_IPV4: - ipv4_hdr = l3_hdr; - *l4_proto = ipv4_hdr->next_proto_id; + if (data_len < m->l2_len + sizeof(struct rte_ipv4_hdr)) + goto error; + ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, + m->l2_len); m->l3_len = rte_ipv4_hdr_len(ipv4_hdr); - *l4_hdr = (char *)l3_hdr + m->l3_len; + if (data_len < m->l2_len + m->l3_len) + goto error; m->ol_flags |= PKT_TX_IPV4; + *l4_proto = ipv4_hdr->next_proto_id; break; case RTE_ETHER_TYPE_IPV6: - ipv6_hdr = l3_hdr; - *l4_proto = ipv6_hdr->proto; + if (data_len < m->l2_len + sizeof(struct rte_ipv6_hdr)) + goto error; + ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, + m->l2_len); m->l3_len = sizeof(struct rte_ipv6_hdr); - *l4_hdr = (char *)l3_hdr + m->l3_len; m->ol_flags |= PKT_TX_IPV6; + *l4_proto = ipv6_hdr->proto; break; default: - m->l3_len = 0; - *l4_proto = 0; - *l4_hdr = NULL; + /* a valid L3 header is needed for further L4 parsing */ + goto error; + } + + /* both CSUM and GSO need a valid L4 header */ + switch (*l4_proto) { + case IPPROTO_TCP: + if (data_len < m->l2_len + m->l3_len + + sizeof(struct rte_tcp_hdr)) + goto error; + break; + case IPPROTO_UDP: + if (data_len < m->l2_len + m->l3_len + + sizeof(struct rte_udp_hdr)) + goto error; break; + case IPPROTO_SCTP: + if (data_len < m->l2_len + m->l3_len + + sizeof(struct rte_sctp_hdr)) + goto error; + break; + default: + goto error; } + + return 0; + +error: + m->l2_len = 0; + m->l3_len = 0; + m->ol_flags = 0; + return -EINVAL; } static __rte_always_inline void vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m) { - uint16_t l4_proto = 0; - void *l4_hdr = NULL; + uint8_t l4_proto = 0; struct rte_tcp_hdr *tcp_hdr = NULL; + uint16_t tcp_len; + uint16_t data_len = rte_pktmbuf_data_len(m); + + if (parse_headers(m, &l4_proto) < 0) + return; - parse_ethernet(m, &l4_proto, &l4_hdr); if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) { if (hdr->csum_start == (m->l2_len + m->l3_len)) { switch (hdr->csum_offset) { case (offsetof(struct rte_tcp_hdr, cksum)): - if (l4_proto == IPPROTO_TCP) - m->ol_flags |= PKT_TX_TCP_CKSUM; + if (l4_proto != IPPROTO_TCP) + goto error; + m->ol_flags |= PKT_TX_TCP_CKSUM; break; case (offsetof(struct rte_udp_hdr, dgram_cksum)): - if (l4_proto == IPPROTO_UDP) - m->ol_flags |= PKT_TX_UDP_CKSUM; + if (l4_proto != IPPROTO_UDP) + goto error; + m->ol_flags |= PKT_TX_UDP_CKSUM; break; case (offsetof(struct rte_sctp_hdr, cksum)): - if (l4_proto == IPPROTO_SCTP) - m->ol_flags |= PKT_TX_SCTP_CKSUM; + if (l4_proto != IPPROTO_SCTP) + goto error; + m->ol_flags |= PKT_TX_SCTP_CKSUM; break; default: - break; + goto error; } + } else { + goto error; } } - if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: case VIRTIO_NET_HDR_GSO_TCPV6: - tcp_hdr = l4_hdr; + if (l4_proto != IPPROTO_TCP) + goto error; + tcp_hdr = rte_pktmbuf_mtod_offset(m, + struct rte_tcp_hdr *, + m->l2_len + m->l3_len); + tcp_len = (tcp_hdr->data_off & 0xf0) >> 2; + if (data_len < m->l2_len + m->l3_len + tcp_len) + goto error; m->ol_flags |= PKT_TX_TCP_SEG; m->tso_segsz = hdr->gso_size; - m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; + m->l4_len = tcp_len; break; case VIRTIO_NET_HDR_GSO_UDP: + if (l4_proto != IPPROTO_UDP) + goto error; m->ol_flags |= PKT_TX_UDP_SEG; m->tso_segsz = hdr->gso_size; m->l4_len = sizeof(struct rte_udp_hdr); @@ -2352,9 +2407,15 @@ vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m) default: VHOST_LOG_DATA(WARNING, "unsupported gso type %u.\n", hdr->gso_type); - break; + goto error; } } + return; + +error: + m->l2_len = 0; + m->l3_len = 0; + m->ol_flags = 0; } static __rte_always_inline void -- 2.15.1