DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] vhost: add header check in dequeue offload
@ 2021-03-11  6:38 Xiao Wang
  2021-03-11 10:38 ` Ananyev, Konstantin
                   ` (2 more replies)
  0 siblings, 3 replies; 20+ messages in thread
From: Xiao Wang @ 2021-03-11  6:38 UTC (permalink / raw)
  To: chenbo.xia, maxime.coquelin; +Cc: yong.liu, dev, Xiao Wang, stable

When parsing the virtio net header and packet header for dequeue offload,
we need to perform sanity check on the packet header to ensure:
  - No out-of-boundary memory access.
  - The packet header and virtio_net header are valid and aligned.

Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
Cc: stable@dpdk.org

Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
---
 lib/librte_vhost/virtio_net.c | 49 +++++++++++++++++++++++++++++++++++++------
 1 file changed, 43 insertions(+), 6 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 583bf379c6..0fba0053a3 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1821,44 +1821,64 @@ virtio_net_with_host_offload(struct virtio_net *dev)
 	return false;
 }
 
-static void
-parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
+static int
+parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr,
+		uint16_t *len)
 {
 	struct rte_ipv4_hdr *ipv4_hdr;
 	struct rte_ipv6_hdr *ipv6_hdr;
 	void *l3_hdr = NULL;
 	struct rte_ether_hdr *eth_hdr;
 	uint16_t ethertype;
+	uint16_t data_len = m->data_len;
 
 	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
+	if (data_len <= sizeof(struct rte_ether_hdr))
+		return -EINVAL;
+
 	m->l2_len = sizeof(struct rte_ether_hdr);
 	ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
+	data_len -= sizeof(struct rte_ether_hdr);
 
 	if (ethertype == RTE_ETHER_TYPE_VLAN) {
+		if (data_len <= sizeof(struct rte_vlan_hdr))
+			return -EINVAL;
+
 		struct rte_vlan_hdr *vlan_hdr =
 			(struct rte_vlan_hdr *)(eth_hdr + 1);
 
 		m->l2_len += sizeof(struct rte_vlan_hdr);
 		ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+		data_len -= sizeof(struct rte_vlan_hdr);
 	}
 
 	l3_hdr = (char *)eth_hdr + m->l2_len;
 
 	switch (ethertype) {
 	case RTE_ETHER_TYPE_IPV4:
+		if (data_len <= sizeof(struct rte_ipv4_hdr))
+			return -EINVAL;
 		ipv4_hdr = l3_hdr;
 		*l4_proto = ipv4_hdr->next_proto_id;
 		m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
+		if (data_len <= m->l3_len) {
+			m->l3_len = 0;
+			return -EINVAL;
+		}
 		*l4_hdr = (char *)l3_hdr + m->l3_len;
 		m->ol_flags |= PKT_TX_IPV4;
+		data_len -= m->l3_len;
 		break;
 	case RTE_ETHER_TYPE_IPV6:
+		if (data_len <= sizeof(struct rte_ipv6_hdr))
+			return -EINVAL;
 		ipv6_hdr = l3_hdr;
 		*l4_proto = ipv6_hdr->proto;
 		m->l3_len = sizeof(struct rte_ipv6_hdr);
 		*l4_hdr = (char *)l3_hdr + m->l3_len;
 		m->ol_flags |= PKT_TX_IPV6;
+		data_len -= m->l3_len;
 		break;
 	default:
 		m->l3_len = 0;
@@ -1866,6 +1886,9 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
 		*l4_hdr = NULL;
 		break;
 	}
+
+	*len = data_len;
+	return 0;
 }
 
 static __rte_always_inline void
@@ -1874,24 +1897,30 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 	uint16_t l4_proto = 0;
 	void *l4_hdr = NULL;
 	struct rte_tcp_hdr *tcp_hdr = NULL;
+	uint16_t len = 0;
 
 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
 		return;
 
-	parse_ethernet(m, &l4_proto, &l4_hdr);
+	if (parse_ethernet(m, &l4_proto, &l4_hdr, &len) < 0)
+		return;
+
 	if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 		if (hdr->csum_start == (m->l2_len + m->l3_len)) {
 			switch (hdr->csum_offset) {
 			case (offsetof(struct rte_tcp_hdr, cksum)):
-				if (l4_proto == IPPROTO_TCP)
+				if (l4_proto == IPPROTO_TCP &&
+					len > sizeof(struct rte_tcp_hdr))
 					m->ol_flags |= PKT_TX_TCP_CKSUM;
 				break;
 			case (offsetof(struct rte_udp_hdr, dgram_cksum)):
-				if (l4_proto == IPPROTO_UDP)
+				if (l4_proto == IPPROTO_UDP &&
+					len > sizeof(struct rte_udp_hdr))
 					m->ol_flags |= PKT_TX_UDP_CKSUM;
 				break;
 			case (offsetof(struct rte_sctp_hdr, cksum)):
-				if (l4_proto == IPPROTO_SCTP)
+				if (l4_proto == IPPROTO_SCTP &&
+					len > sizeof(struct rte_sctp_hdr))
 					m->ol_flags |= PKT_TX_SCTP_CKSUM;
 				break;
 			default:
@@ -1904,12 +1933,20 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
 		case VIRTIO_NET_HDR_GSO_TCPV4:
 		case VIRTIO_NET_HDR_GSO_TCPV6:
+			if (l4_proto != IPPROTO_TCP ||
+					len <= sizeof(struct rte_tcp_hdr))
+				break;
 			tcp_hdr = l4_hdr;
+			if (len <= (tcp_hdr->data_off & 0xf0) >> 2)
+				break;
 			m->ol_flags |= PKT_TX_TCP_SEG;
 			m->tso_segsz = hdr->gso_size;
 			m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
 			break;
 		case VIRTIO_NET_HDR_GSO_UDP:
+			if (l4_proto != IPPROTO_UDP ||
+					len <= sizeof(struct rte_udp_hdr))
+				break;
 			m->ol_flags |= PKT_TX_UDP_SEG;
 			m->tso_segsz = hdr->gso_size;
 			m->l4_len = sizeof(struct rte_udp_hdr);
-- 
2.15.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] vhost: add header check in dequeue offload
  2021-03-11  6:38 [dpdk-dev] [PATCH] vhost: add header check in dequeue offload Xiao Wang
@ 2021-03-11 10:38 ` Ananyev, Konstantin
  2021-03-12 15:19   ` Wang, Xiao W
  2021-03-15 15:32 ` [dpdk-dev] [PATCH v2] " Xiao Wang
  2021-03-17  6:31 ` [dpdk-dev] [PATCH v3] " Xiao Wang
  2 siblings, 1 reply; 20+ messages in thread
From: Ananyev, Konstantin @ 2021-03-11 10:38 UTC (permalink / raw)
  To: Wang, Xiao W, Xia, Chenbo, maxime.coquelin
  Cc: Liu, Yong, dev, Wang, Xiao W, stable



> 
> When parsing the virtio net header and packet header for dequeue offload,
> we need to perform sanity check on the packet header to ensure:
>   - No out-of-boundary memory access.
>   - The packet header and virtio_net header are valid and aligned.
> 
> Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> ---
>  lib/librte_vhost/virtio_net.c | 49 +++++++++++++++++++++++++++++++++++++------
>  1 file changed, 43 insertions(+), 6 deletions(-)
> 
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 583bf379c6..0fba0053a3 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -1821,44 +1821,64 @@ virtio_net_with_host_offload(struct virtio_net *dev)
>  	return false;
>  }
> 
> -static void
> -parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
> +static int
> +parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr,
> +		uint16_t *len)
>  {
>  	struct rte_ipv4_hdr *ipv4_hdr;
>  	struct rte_ipv6_hdr *ipv6_hdr;
>  	void *l3_hdr = NULL;
>  	struct rte_ether_hdr *eth_hdr;
>  	uint16_t ethertype;
> +	uint16_t data_len = m->data_len;
> 
>  	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
> 
> +	if (data_len <= sizeof(struct rte_ether_hdr))
> +		return -EINVAL;
> +
>  	m->l2_len = sizeof(struct rte_ether_hdr);
>  	ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
> +	data_len -= sizeof(struct rte_ether_hdr);
> 
>  	if (ethertype == RTE_ETHER_TYPE_VLAN) {
> +		if (data_len <= sizeof(struct rte_vlan_hdr))
> +			return -EINVAL;
> +
>  		struct rte_vlan_hdr *vlan_hdr =
>  			(struct rte_vlan_hdr *)(eth_hdr + 1);
> 
>  		m->l2_len += sizeof(struct rte_vlan_hdr);
>  		ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
> +		data_len -= sizeof(struct rte_vlan_hdr);
>  	}
> 
>  	l3_hdr = (char *)eth_hdr + m->l2_len;
> 
>  	switch (ethertype) {
>  	case RTE_ETHER_TYPE_IPV4:
> +		if (data_len <= sizeof(struct rte_ipv4_hdr))
> +			return -EINVAL;
>  		ipv4_hdr = l3_hdr;
>  		*l4_proto = ipv4_hdr->next_proto_id;
>  		m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
> +		if (data_len <= m->l3_len) {
> +			m->l3_len = 0;
> +			return -EINVAL;
> +		}
>  		*l4_hdr = (char *)l3_hdr + m->l3_len;
>  		m->ol_flags |= PKT_TX_IPV4;
> +		data_len -= m->l3_len;
>  		break;
>  	case RTE_ETHER_TYPE_IPV6:
> +		if (data_len <= sizeof(struct rte_ipv6_hdr))
> +			return -EINVAL;
>  		ipv6_hdr = l3_hdr;
>  		*l4_proto = ipv6_hdr->proto;
>  		m->l3_len = sizeof(struct rte_ipv6_hdr);
>  		*l4_hdr = (char *)l3_hdr + m->l3_len;
>  		m->ol_flags |= PKT_TX_IPV6;
> +		data_len -= m->l3_len;
>  		break;
>  	default:
>  		m->l3_len = 0;
> @@ -1866,6 +1886,9 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
>  		*l4_hdr = NULL;
>  		break;
>  	}
> +
> +	*len = data_len;
> +	return 0;
>  }
> 
>  static __rte_always_inline void
> @@ -1874,24 +1897,30 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
>  	uint16_t l4_proto = 0;
>  	void *l4_hdr = NULL;
>  	struct rte_tcp_hdr *tcp_hdr = NULL;
> +	uint16_t len = 0;
> 
>  	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
>  		return;
> 
> -	parse_ethernet(m, &l4_proto, &l4_hdr);
> +	if (parse_ethernet(m, &l4_proto, &l4_hdr, &len) < 0)
> +		return;
> +
>  	if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
>  		if (hdr->csum_start == (m->l2_len + m->l3_len)) {
>  			switch (hdr->csum_offset) {
>  			case (offsetof(struct rte_tcp_hdr, cksum)):
> -				if (l4_proto == IPPROTO_TCP)
> +				if (l4_proto == IPPROTO_TCP &&
> +					len > sizeof(struct rte_tcp_hdr))

Shouldn't it be '>=' here?

>  					m->ol_flags |= PKT_TX_TCP_CKSUM;
>  				break;
>  			case (offsetof(struct rte_udp_hdr, dgram_cksum)):
> -				if (l4_proto == IPPROTO_UDP)
> +				if (l4_proto == IPPROTO_UDP &&
> +					len > sizeof(struct rte_udp_hdr))
>  					m->ol_flags |= PKT_TX_UDP_CKSUM;
>  				break;
>  			case (offsetof(struct rte_sctp_hdr, cksum)):
> -				if (l4_proto == IPPROTO_SCTP)
> +				if (l4_proto == IPPROTO_SCTP &&
> +					len > sizeof(struct rte_sctp_hdr))
>  					m->ol_flags |= PKT_TX_SCTP_CKSUM;
>  				break;
>  			default:
> @@ -1904,12 +1933,20 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
>  		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
>  		case VIRTIO_NET_HDR_GSO_TCPV4:
>  		case VIRTIO_NET_HDR_GSO_TCPV6:
> +			if (l4_proto != IPPROTO_TCP ||
> +					len <= sizeof(struct rte_tcp_hdr))
> +				break;
>  			tcp_hdr = l4_hdr;
> +			if (len <= (tcp_hdr->data_off & 0xf0) >> 2)
> +				break;
>  			m->ol_flags |= PKT_TX_TCP_SEG;
>  			m->tso_segsz = hdr->gso_size;
>  			m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
>  			break;
>  		case VIRTIO_NET_HDR_GSO_UDP:
> +			if (l4_proto != IPPROTO_UDP ||
> +					len <= sizeof(struct rte_udp_hdr))
> +				break;
>  			m->ol_flags |= PKT_TX_UDP_SEG;
>  			m->tso_segsz = hdr->gso_size;
>  			m->l4_len = sizeof(struct rte_udp_hdr);
> --
> 2.15.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH] vhost: add header check in dequeue offload
  2021-03-11 10:38 ` Ananyev, Konstantin
@ 2021-03-12 15:19   ` Wang, Xiao W
  0 siblings, 0 replies; 20+ messages in thread
From: Wang, Xiao W @ 2021-03-12 15:19 UTC (permalink / raw)
  To: Ananyev, Konstantin, Xia, Chenbo, maxime.coquelin; +Cc: Liu, Yong, dev, stable

Hi Konstantin,

Comments inline.

BRs,
Xiao

> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Thursday, March 11, 2021 6:38 PM
> To: Wang, Xiao W <xiao.w.wang@intel.com>; Xia, Chenbo
> <chenbo.xia@intel.com>; maxime.coquelin@redhat.com
> Cc: Liu, Yong <yong.liu@intel.com>; dev@dpdk.org; Wang, Xiao W
> <xiao.w.wang@intel.com>; stable@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH] vhost: add header check in dequeue
> offload
> 
> 
> 
> >
> > When parsing the virtio net header and packet header for dequeue
> offload,
> > we need to perform sanity check on the packet header to ensure:
> >   - No out-of-boundary memory access.
> >   - The packet header and virtio_net header are valid and aligned.
> >
> > Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> > ---
> >  lib/librte_vhost/virtio_net.c | 49
> +++++++++++++++++++++++++++++++++++++------
> >  1 file changed, 43 insertions(+), 6 deletions(-)
> >
> > diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> > index 583bf379c6..0fba0053a3 100644
> > --- a/lib/librte_vhost/virtio_net.c
> > +++ b/lib/librte_vhost/virtio_net.c
> > @@ -1821,44 +1821,64 @@ virtio_net_with_host_offload(struct
> virtio_net *dev)
> >  	return false;
> >  }
> >
> > -static void
> > -parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
> > +static int
> > +parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr,
> > +		uint16_t *len)
> >  {
> >  	struct rte_ipv4_hdr *ipv4_hdr;
> >  	struct rte_ipv6_hdr *ipv6_hdr;
> >  	void *l3_hdr = NULL;
> >  	struct rte_ether_hdr *eth_hdr;
> >  	uint16_t ethertype;
> > +	uint16_t data_len = m->data_len;
> >
> >  	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
> >
> > +	if (data_len <= sizeof(struct rte_ether_hdr))
> > +		return -EINVAL;
> > +
> >  	m->l2_len = sizeof(struct rte_ether_hdr);
> >  	ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
> > +	data_len -= sizeof(struct rte_ether_hdr);
> >
> >  	if (ethertype == RTE_ETHER_TYPE_VLAN) {
> > +		if (data_len <= sizeof(struct rte_vlan_hdr))
> > +			return -EINVAL;
> > +
> >  		struct rte_vlan_hdr *vlan_hdr =
> >  			(struct rte_vlan_hdr *)(eth_hdr + 1);
> >
> >  		m->l2_len += sizeof(struct rte_vlan_hdr);
> >  		ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
> > +		data_len -= sizeof(struct rte_vlan_hdr);
> >  	}
> >
> >  	l3_hdr = (char *)eth_hdr + m->l2_len;
> >
> >  	switch (ethertype) {
> >  	case RTE_ETHER_TYPE_IPV4:
> > +		if (data_len <= sizeof(struct rte_ipv4_hdr))
> > +			return -EINVAL;
> >  		ipv4_hdr = l3_hdr;
> >  		*l4_proto = ipv4_hdr->next_proto_id;
> >  		m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
> > +		if (data_len <= m->l3_len) {
> > +			m->l3_len = 0;
> > +			return -EINVAL;
> > +		}
> >  		*l4_hdr = (char *)l3_hdr + m->l3_len;
> >  		m->ol_flags |= PKT_TX_IPV4;
> > +		data_len -= m->l3_len;
> >  		break;
> >  	case RTE_ETHER_TYPE_IPV6:
> > +		if (data_len <= sizeof(struct rte_ipv6_hdr))
> > +			return -EINVAL;
> >  		ipv6_hdr = l3_hdr;
> >  		*l4_proto = ipv6_hdr->proto;
> >  		m->l3_len = sizeof(struct rte_ipv6_hdr);
> >  		*l4_hdr = (char *)l3_hdr + m->l3_len;
> >  		m->ol_flags |= PKT_TX_IPV6;
> > +		data_len -= m->l3_len;
> >  		break;
> >  	default:
> >  		m->l3_len = 0;
> > @@ -1866,6 +1886,9 @@ parse_ethernet(struct rte_mbuf *m, uint16_t
> *l4_proto, void **l4_hdr)
> >  		*l4_hdr = NULL;
> >  		break;
> >  	}
> > +
> > +	*len = data_len;
> > +	return 0;
> >  }
> >
> >  static __rte_always_inline void
> > @@ -1874,24 +1897,30 @@ vhost_dequeue_offload(struct virtio_net_hdr
> *hdr, struct rte_mbuf *m)
> >  	uint16_t l4_proto = 0;
> >  	void *l4_hdr = NULL;
> >  	struct rte_tcp_hdr *tcp_hdr = NULL;
> > +	uint16_t len = 0;
> >
> >  	if (hdr->flags == 0 && hdr->gso_type ==
> VIRTIO_NET_HDR_GSO_NONE)
> >  		return;
> >
> > -	parse_ethernet(m, &l4_proto, &l4_hdr);
> > +	if (parse_ethernet(m, &l4_proto, &l4_hdr, &len) < 0)
> > +		return;
> > +
> >  	if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
> >  		if (hdr->csum_start == (m->l2_len + m->l3_len)) {
> >  			switch (hdr->csum_offset) {
> >  			case (offsetof(struct rte_tcp_hdr, cksum)):
> > -				if (l4_proto == IPPROTO_TCP)
> > +				if (l4_proto == IPPROTO_TCP &&
> > +					len > sizeof(struct rte_tcp_hdr))
> 
> Shouldn't it be '>=' here?

You are right, there're packets with no payload. I just did an experiment where I send a tcp packet with only headers from guest using scapy and vhost side get 54 Bytes (14+20+20), there's no padding.
I would fix this issue also for udp and sctp.
Thanks for the comment.

-Xiao

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [dpdk-dev] [PATCH v2] vhost: add header check in dequeue offload
  2021-03-11  6:38 [dpdk-dev] [PATCH] vhost: add header check in dequeue offload Xiao Wang
  2021-03-11 10:38 ` Ananyev, Konstantin
@ 2021-03-15 15:32 ` Xiao Wang
  2021-03-15 16:17   ` [dpdk-dev] [dpdk-stable] " David Marchand
  2021-03-17  6:31 ` [dpdk-dev] [PATCH v3] " Xiao Wang
  2 siblings, 1 reply; 20+ messages in thread
From: Xiao Wang @ 2021-03-15 15:32 UTC (permalink / raw)
  To: chenbo.xia, maxime.coquelin
  Cc: yong.liu, dev, konstantin.ananyev, Xiao Wang, stable

When parsing the virtio net header and packet header for dequeue offload,
we need to perform sanity check on the packet header to ensure:
  - No out-of-boundary memory access.
  - The packet header and virtio_net header are valid and aligned.

Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
Cc: stable@dpdk.org

Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
---
v2:
Allow empty L4 payload for cksum offload.
---
 lib/librte_vhost/virtio_net.c | 49 +++++++++++++++++++++++++++++++++++++------
 1 file changed, 43 insertions(+), 6 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 583bf379c6..53a8ff2898 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1821,44 +1821,64 @@ virtio_net_with_host_offload(struct virtio_net *dev)
 	return false;
 }
 
-static void
-parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
+static int
+parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr,
+		uint16_t *len)
 {
 	struct rte_ipv4_hdr *ipv4_hdr;
 	struct rte_ipv6_hdr *ipv6_hdr;
 	void *l3_hdr = NULL;
 	struct rte_ether_hdr *eth_hdr;
 	uint16_t ethertype;
+	uint16_t data_len = m->data_len;
 
 	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
+	if (data_len <= sizeof(struct rte_ether_hdr))
+		return -EINVAL;
+
 	m->l2_len = sizeof(struct rte_ether_hdr);
 	ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
+	data_len -= sizeof(struct rte_ether_hdr);
 
 	if (ethertype == RTE_ETHER_TYPE_VLAN) {
+		if (data_len <= sizeof(struct rte_vlan_hdr))
+			return -EINVAL;
+
 		struct rte_vlan_hdr *vlan_hdr =
 			(struct rte_vlan_hdr *)(eth_hdr + 1);
 
 		m->l2_len += sizeof(struct rte_vlan_hdr);
 		ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+		data_len -= sizeof(struct rte_vlan_hdr);
 	}
 
 	l3_hdr = (char *)eth_hdr + m->l2_len;
 
 	switch (ethertype) {
 	case RTE_ETHER_TYPE_IPV4:
+		if (data_len <= sizeof(struct rte_ipv4_hdr))
+			return -EINVAL;
 		ipv4_hdr = l3_hdr;
 		*l4_proto = ipv4_hdr->next_proto_id;
 		m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
+		if (data_len <= m->l3_len) {
+			m->l3_len = 0;
+			return -EINVAL;
+		}
 		*l4_hdr = (char *)l3_hdr + m->l3_len;
 		m->ol_flags |= PKT_TX_IPV4;
+		data_len -= m->l3_len;
 		break;
 	case RTE_ETHER_TYPE_IPV6:
+		if (data_len <= sizeof(struct rte_ipv6_hdr))
+			return -EINVAL;
 		ipv6_hdr = l3_hdr;
 		*l4_proto = ipv6_hdr->proto;
 		m->l3_len = sizeof(struct rte_ipv6_hdr);
 		*l4_hdr = (char *)l3_hdr + m->l3_len;
 		m->ol_flags |= PKT_TX_IPV6;
+		data_len -= m->l3_len;
 		break;
 	default:
 		m->l3_len = 0;
@@ -1866,6 +1886,9 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
 		*l4_hdr = NULL;
 		break;
 	}
+
+	*len = data_len;
+	return 0;
 }
 
 static __rte_always_inline void
@@ -1874,24 +1897,30 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 	uint16_t l4_proto = 0;
 	void *l4_hdr = NULL;
 	struct rte_tcp_hdr *tcp_hdr = NULL;
+	uint16_t len = 0;
 
 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
 		return;
 
-	parse_ethernet(m, &l4_proto, &l4_hdr);
+	if (parse_ethernet(m, &l4_proto, &l4_hdr, &len) < 0)
+		return;
+
 	if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 		if (hdr->csum_start == (m->l2_len + m->l3_len)) {
 			switch (hdr->csum_offset) {
 			case (offsetof(struct rte_tcp_hdr, cksum)):
-				if (l4_proto == IPPROTO_TCP)
+				if (l4_proto == IPPROTO_TCP &&
+					len >= sizeof(struct rte_tcp_hdr))
 					m->ol_flags |= PKT_TX_TCP_CKSUM;
 				break;
 			case (offsetof(struct rte_udp_hdr, dgram_cksum)):
-				if (l4_proto == IPPROTO_UDP)
+				if (l4_proto == IPPROTO_UDP &&
+					len >= sizeof(struct rte_udp_hdr))
 					m->ol_flags |= PKT_TX_UDP_CKSUM;
 				break;
 			case (offsetof(struct rte_sctp_hdr, cksum)):
-				if (l4_proto == IPPROTO_SCTP)
+				if (l4_proto == IPPROTO_SCTP &&
+					len >= sizeof(struct rte_sctp_hdr))
 					m->ol_flags |= PKT_TX_SCTP_CKSUM;
 				break;
 			default:
@@ -1904,12 +1933,20 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
 		case VIRTIO_NET_HDR_GSO_TCPV4:
 		case VIRTIO_NET_HDR_GSO_TCPV6:
+			if (l4_proto != IPPROTO_TCP ||
+					len <= sizeof(struct rte_tcp_hdr))
+				break;
 			tcp_hdr = l4_hdr;
+			if (len <= (tcp_hdr->data_off & 0xf0) >> 2)
+				break;
 			m->ol_flags |= PKT_TX_TCP_SEG;
 			m->tso_segsz = hdr->gso_size;
 			m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
 			break;
 		case VIRTIO_NET_HDR_GSO_UDP:
+			if (l4_proto != IPPROTO_UDP ||
+					len <= sizeof(struct rte_udp_hdr))
+				break;
 			m->ol_flags |= PKT_TX_UDP_SEG;
 			m->tso_segsz = hdr->gso_size;
 			m->l4_len = sizeof(struct rte_udp_hdr);
-- 
2.15.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [dpdk-stable] [PATCH v2] vhost: add header check in dequeue offload
  2021-03-15 15:32 ` [dpdk-dev] [PATCH v2] " Xiao Wang
@ 2021-03-15 16:17   ` David Marchand
  2021-03-15 18:53     ` Ananyev, Konstantin
  0 siblings, 1 reply; 20+ messages in thread
From: David Marchand @ 2021-03-15 16:17 UTC (permalink / raw)
  To: Xiao Wang
  Cc: Xia, Chenbo, Maxime Coquelin, Marvin Liu, dev, Ananyev,
	Konstantin, dpdk stable

On Mon, Mar 15, 2021 at 4:52 PM Xiao Wang <xiao.w.wang@intel.com> wrote:
>
> When parsing the virtio net header and packet header for dequeue offload,
> we need to perform sanity check on the packet header to ensure:
>   - No out-of-boundary memory access.
>   - The packet header and virtio_net header are valid and aligned.
>
> Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
> Cc: stable@dpdk.org
>
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> ---
> v2:
> Allow empty L4 payload for cksum offload.
> ---
>  lib/librte_vhost/virtio_net.c | 49 +++++++++++++++++++++++++++++++++++++------
>  1 file changed, 43 insertions(+), 6 deletions(-)
>
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 583bf379c6..53a8ff2898 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -1821,44 +1821,64 @@ virtio_net_with_host_offload(struct virtio_net *dev)
>         return false;
>  }
>
> -static void
> -parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
> +static int
> +parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr,
> +               uint16_t *len)
>  {
>         struct rte_ipv4_hdr *ipv4_hdr;
>         struct rte_ipv6_hdr *ipv6_hdr;
>         void *l3_hdr = NULL;
>         struct rte_ether_hdr *eth_hdr;
>         uint16_t ethertype;
> +       uint16_t data_len = m->data_len;

>
>         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
>
> +       if (data_len <= sizeof(struct rte_ether_hdr))
> +               return -EINVAL;

On principle, the check should happen before calling rte_pktmbuf_mtod,
like what rte_pktmbuf_read does.

Looking at the rest of the patch, does this helper function only
handle mono segment mbufs?
My reading of copy_desc_to_mbuf() was that it could generate multi
segments mbufs...


[snip]

>         case RTE_ETHER_TYPE_IPV4:
> +               if (data_len <= sizeof(struct rte_ipv4_hdr))
> +                       return -EINVAL;
>                 ipv4_hdr = l3_hdr;
>                 *l4_proto = ipv4_hdr->next_proto_id;
>                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
> +               if (data_len <= m->l3_len) {
> +                       m->l3_len = 0;
> +                       return -EINVAL;
> +               }

... so here, comparing l3 length to only the first segment length
(data_len) would be invalid.

If this helper must deal with multi segments, why not use rte_pktmbuf_read?
This function returns access to mbuf data after checking offset and
length are contiguous, else copy the needed data in a passed buffer.


>                 *l4_hdr = (char *)l3_hdr + m->l3_len;
>                 m->ol_flags |= PKT_TX_IPV4;
> +               data_len -= m->l3_len;
>                 break;


-- 
David Marchand


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [dpdk-stable] [PATCH v2] vhost: add header check in dequeue offload
  2021-03-15 16:17   ` [dpdk-dev] [dpdk-stable] " David Marchand
@ 2021-03-15 18:53     ` Ananyev, Konstantin
  2021-03-16  9:13       ` Wang, Xiao W
  0 siblings, 1 reply; 20+ messages in thread
From: Ananyev, Konstantin @ 2021-03-15 18:53 UTC (permalink / raw)
  To: David Marchand, Wang, Xiao W
  Cc: Xia, Chenbo, Maxime Coquelin, Liu, Yong, dev, dpdk stable



> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Monday, March 15, 2021 4:17 PM
> To: Wang, Xiao W <xiao.w.wang@intel.com>
> Cc: Xia, Chenbo <chenbo.xia@intel.com>; Maxime Coquelin <maxime.coquelin@redhat.com>; Liu, Yong <yong.liu@intel.com>; dev
> <dev@dpdk.org>; Ananyev, Konstantin <konstantin.ananyev@intel.com>; dpdk stable <stable@dpdk.org>
> Subject: Re: [dpdk-stable] [PATCH v2] vhost: add header check in dequeue offload
> 
> On Mon, Mar 15, 2021 at 4:52 PM Xiao Wang <xiao.w.wang@intel.com> wrote:
> >
> > When parsing the virtio net header and packet header for dequeue offload,
> > we need to perform sanity check on the packet header to ensure:
> >   - No out-of-boundary memory access.
> >   - The packet header and virtio_net header are valid and aligned.
> >
> > Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> > ---
> > v2:
> > Allow empty L4 payload for cksum offload.
> > ---
> >  lib/librte_vhost/virtio_net.c | 49 +++++++++++++++++++++++++++++++++++++------
> >  1 file changed, 43 insertions(+), 6 deletions(-)
> >
> > diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> > index 583bf379c6..53a8ff2898 100644
> > --- a/lib/librte_vhost/virtio_net.c
> > +++ b/lib/librte_vhost/virtio_net.c
> > @@ -1821,44 +1821,64 @@ virtio_net_with_host_offload(struct virtio_net *dev)
> >         return false;
> >  }
> >
> > -static void
> > -parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
> > +static int
> > +parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr,
> > +               uint16_t *len)
> >  {
> >         struct rte_ipv4_hdr *ipv4_hdr;
> >         struct rte_ipv6_hdr *ipv6_hdr;
> >         void *l3_hdr = NULL;
> >         struct rte_ether_hdr *eth_hdr;
> >         uint16_t ethertype;
> > +       uint16_t data_len = m->data_len;
> 
> >
> >         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
> >
> > +       if (data_len <= sizeof(struct rte_ether_hdr))
> > +               return -EINVAL;
> 
> On principle, the check should happen before calling rte_pktmbuf_mtod,
> like what rte_pktmbuf_read does.
> 
> Looking at the rest of the patch, does this helper function only
> handle mono segment mbufs?
> My reading of copy_desc_to_mbuf() was that it could generate multi
> segments mbufs...
> 
> 
> [snip]
> 
> >         case RTE_ETHER_TYPE_IPV4:
> > +               if (data_len <= sizeof(struct rte_ipv4_hdr))
> > +                       return -EINVAL;
> >                 ipv4_hdr = l3_hdr;
> >                 *l4_proto = ipv4_hdr->next_proto_id;
> >                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
> > +               if (data_len <= m->l3_len) {
> > +                       m->l3_len = 0;
> > +                       return -EINVAL;
> > +               }
> 
> ... so here, comparing l3 length to only the first segment length
> (data_len) would be invalid.
> 
> If this helper must deal with multi segments, why not use rte_pktmbuf_read?
> This function returns access to mbuf data after checking offset and
> length are contiguous, else copy the needed data in a passed buffer.

From my understanding, yes multi-seg is allowed, but an expectation
Is that at least packet header (l2/l3/l4?) will always reside in first segment.

> 
> 
> >                 *l4_hdr = (char *)l3_hdr + m->l3_len;
> >                 m->ol_flags |= PKT_TX_IPV4;
> > +               data_len -= m->l3_len;
> >                 break;
> 
> 
> --
> David Marchand


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [dpdk-stable] [PATCH v2] vhost: add header check in dequeue offload
  2021-03-15 18:53     ` Ananyev, Konstantin
@ 2021-03-16  9:13       ` Wang, Xiao W
  0 siblings, 0 replies; 20+ messages in thread
From: Wang, Xiao W @ 2021-03-16  9:13 UTC (permalink / raw)
  To: Ananyev, Konstantin, David Marchand
  Cc: Xia, Chenbo, Maxime Coquelin, Liu, Yong, dev, dpdk stable

Hi,

Comments inline.

BRs,
Xiao

> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Tuesday, March 16, 2021 2:53 AM
> To: David Marchand <david.marchand@redhat.com>; Wang, Xiao W
> <xiao.w.wang@intel.com>
> Cc: Xia, Chenbo <chenbo.xia@intel.com>; Maxime Coquelin
> <maxime.coquelin@redhat.com>; Liu, Yong <yong.liu@intel.com>; dev
> <dev@dpdk.org>; dpdk stable <stable@dpdk.org>
> Subject: RE: [dpdk-stable] [PATCH v2] vhost: add header check in dequeue
> offload
> 
> 
> 
> > -----Original Message-----
> > From: David Marchand <david.marchand@redhat.com>
> > Sent: Monday, March 15, 2021 4:17 PM
> > To: Wang, Xiao W <xiao.w.wang@intel.com>
> > Cc: Xia, Chenbo <chenbo.xia@intel.com>; Maxime Coquelin
> <maxime.coquelin@redhat.com>; Liu, Yong <yong.liu@intel.com>; dev
> > <dev@dpdk.org>; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> dpdk stable <stable@dpdk.org>
> > Subject: Re: [dpdk-stable] [PATCH v2] vhost: add header check in dequeue
> offload
> >
> > On Mon, Mar 15, 2021 at 4:52 PM Xiao Wang <xiao.w.wang@intel.com>
> wrote:
> > >
> > > When parsing the virtio net header and packet header for dequeue
> offload,
> > > we need to perform sanity check on the packet header to ensure:
> > >   - No out-of-boundary memory access.
> > >   - The packet header and virtio_net header are valid and aligned.
> > >
> > > Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
> > > Cc: stable@dpdk.org
> > >
> > > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> > > ---
> > > v2:
> > > Allow empty L4 payload for cksum offload.
> > > ---
> > >  lib/librte_vhost/virtio_net.c | 49
> +++++++++++++++++++++++++++++++++++++------
> > >  1 file changed, 43 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> > > index 583bf379c6..53a8ff2898 100644
> > > --- a/lib/librte_vhost/virtio_net.c
> > > +++ b/lib/librte_vhost/virtio_net.c
> > > @@ -1821,44 +1821,64 @@ virtio_net_with_host_offload(struct
> virtio_net *dev)
> > >         return false;
> > >  }
> > >
> > > -static void
> > > -parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void
> **l4_hdr)
> > > +static int
> > > +parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void
> **l4_hdr,
> > > +               uint16_t *len)
> > >  {
> > >         struct rte_ipv4_hdr *ipv4_hdr;
> > >         struct rte_ipv6_hdr *ipv6_hdr;
> > >         void *l3_hdr = NULL;
> > >         struct rte_ether_hdr *eth_hdr;
> > >         uint16_t ethertype;
> > > +       uint16_t data_len = m->data_len;
> >
> > >
> > >         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
> > >
> > > +       if (data_len <= sizeof(struct rte_ether_hdr))
> > > +               return -EINVAL;
> >
> > On principle, the check should happen before calling rte_pktmbuf_mtod,
> > like what rte_pktmbuf_read does.

Yes, I agree. Will fix it in v3.

> >
> > Looking at the rest of the patch, does this helper function only
> > handle mono segment mbufs?
> > My reading of copy_desc_to_mbuf() was that it could generate multi
> > segments mbufs...

copy_desc_to_mbuf() could generate multi seg mbufs, and the whole packet would be copied into these multi-mbufs when packet size is larger than mbuf's capacity.
Anyway, one mbuf's capacity is big enough for holding the L2/L3/L4 header.

> >
> >
> > [snip]
> >
> > >         case RTE_ETHER_TYPE_IPV4:
> > > +               if (data_len <= sizeof(struct rte_ipv4_hdr))
> > > +                       return -EINVAL;
> > >                 ipv4_hdr = l3_hdr;
> > >                 *l4_proto = ipv4_hdr->next_proto_id;
> > >                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
> > > +               if (data_len <= m->l3_len) {
> > > +                       m->l3_len = 0;
> > > +                       return -EINVAL;
> > > +               }
> >
> > ... so here, comparing l3 length to only the first segment length
> > (data_len) would be invalid.
> >
> > If this helper must deal with multi segments, why not use
> rte_pktmbuf_read?
> > This function returns access to mbuf data after checking offset and
> > length are contiguous, else copy the needed data in a passed buffer.
> 
> From my understanding, yes multi-seg is allowed, but an expectation
> Is that at least packet header (l2/l3/l4?) will always reside in first segment.

Yeah, I think so.

Thanks for all the comments,
-Xiao

> 
> >
> >
> > >                 *l4_hdr = (char *)l3_hdr + m->l3_len;
> > >                 m->ol_flags |= PKT_TX_IPV4;
> > > +               data_len -= m->l3_len;
> > >                 break;
> >
> >
> > --
> > David Marchand


^ permalink raw reply	[flat|nested] 20+ messages in thread

* [dpdk-dev] [PATCH v3] vhost: add header check in dequeue offload
  2021-03-11  6:38 [dpdk-dev] [PATCH] vhost: add header check in dequeue offload Xiao Wang
  2021-03-11 10:38 ` Ananyev, Konstantin
  2021-03-15 15:32 ` [dpdk-dev] [PATCH v2] " Xiao Wang
@ 2021-03-17  6:31 ` Xiao Wang
  2021-04-01 12:04   ` David Marchand
                     ` (2 more replies)
  2 siblings, 3 replies; 20+ messages in thread
From: Xiao Wang @ 2021-03-17  6:31 UTC (permalink / raw)
  To: chenbo.xia, maxime.coquelin
  Cc: yong.liu, dev, konstantin.ananyev, david.marchand, Xiao Wang, stable

When parsing the virtio net header and packet header for dequeue offload,
we need to perform sanity check on the packet header to ensure:
  - No out-of-boundary memory access.
  - The packet header and virtio_net header are valid and aligned.

Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
Cc: stable@dpdk.org

Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
---
v3:
- Check data_len before calling rte_pktmbuf_mtod. (David)

v2:
- Allow empty L4 payload for cksum offload. (Konstantin)
---
 lib/librte_vhost/virtio_net.c | 49 +++++++++++++++++++++++++++++++++++++------
 1 file changed, 43 insertions(+), 6 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 583bf379c6..f8712f5417 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1821,44 +1821,64 @@ virtio_net_with_host_offload(struct virtio_net *dev)
 	return false;
 }
 
-static void
-parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
+static int
+parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr,
+		uint16_t *len)
 {
 	struct rte_ipv4_hdr *ipv4_hdr;
 	struct rte_ipv6_hdr *ipv6_hdr;
 	void *l3_hdr = NULL;
 	struct rte_ether_hdr *eth_hdr;
 	uint16_t ethertype;
+	uint16_t data_len = m->data_len;
+
+	if (data_len <= sizeof(struct rte_ether_hdr))
+		return -EINVAL;
 
 	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
 	m->l2_len = sizeof(struct rte_ether_hdr);
 	ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
+	data_len -= sizeof(struct rte_ether_hdr);
 
 	if (ethertype == RTE_ETHER_TYPE_VLAN) {
+		if (data_len <= sizeof(struct rte_vlan_hdr))
+			return -EINVAL;
+
 		struct rte_vlan_hdr *vlan_hdr =
 			(struct rte_vlan_hdr *)(eth_hdr + 1);
 
 		m->l2_len += sizeof(struct rte_vlan_hdr);
 		ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+		data_len -= sizeof(struct rte_vlan_hdr);
 	}
 
 	l3_hdr = (char *)eth_hdr + m->l2_len;
 
 	switch (ethertype) {
 	case RTE_ETHER_TYPE_IPV4:
+		if (data_len <= sizeof(struct rte_ipv4_hdr))
+			return -EINVAL;
 		ipv4_hdr = l3_hdr;
 		*l4_proto = ipv4_hdr->next_proto_id;
 		m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
+		if (data_len <= m->l3_len) {
+			m->l3_len = 0;
+			return -EINVAL;
+		}
 		*l4_hdr = (char *)l3_hdr + m->l3_len;
 		m->ol_flags |= PKT_TX_IPV4;
+		data_len -= m->l3_len;
 		break;
 	case RTE_ETHER_TYPE_IPV6:
+		if (data_len <= sizeof(struct rte_ipv6_hdr))
+			return -EINVAL;
 		ipv6_hdr = l3_hdr;
 		*l4_proto = ipv6_hdr->proto;
 		m->l3_len = sizeof(struct rte_ipv6_hdr);
 		*l4_hdr = (char *)l3_hdr + m->l3_len;
 		m->ol_flags |= PKT_TX_IPV6;
+		data_len -= m->l3_len;
 		break;
 	default:
 		m->l3_len = 0;
@@ -1866,6 +1886,9 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
 		*l4_hdr = NULL;
 		break;
 	}
+
+	*len = data_len;
+	return 0;
 }
 
 static __rte_always_inline void
@@ -1874,24 +1897,30 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 	uint16_t l4_proto = 0;
 	void *l4_hdr = NULL;
 	struct rte_tcp_hdr *tcp_hdr = NULL;
+	uint16_t len = 0;
 
 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
 		return;
 
-	parse_ethernet(m, &l4_proto, &l4_hdr);
+	if (parse_ethernet(m, &l4_proto, &l4_hdr, &len) < 0)
+		return;
+
 	if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 		if (hdr->csum_start == (m->l2_len + m->l3_len)) {
 			switch (hdr->csum_offset) {
 			case (offsetof(struct rte_tcp_hdr, cksum)):
-				if (l4_proto == IPPROTO_TCP)
+				if (l4_proto == IPPROTO_TCP &&
+					len >= sizeof(struct rte_tcp_hdr))
 					m->ol_flags |= PKT_TX_TCP_CKSUM;
 				break;
 			case (offsetof(struct rte_udp_hdr, dgram_cksum)):
-				if (l4_proto == IPPROTO_UDP)
+				if (l4_proto == IPPROTO_UDP &&
+					len >= sizeof(struct rte_udp_hdr))
 					m->ol_flags |= PKT_TX_UDP_CKSUM;
 				break;
 			case (offsetof(struct rte_sctp_hdr, cksum)):
-				if (l4_proto == IPPROTO_SCTP)
+				if (l4_proto == IPPROTO_SCTP &&
+					len >= sizeof(struct rte_sctp_hdr))
 					m->ol_flags |= PKT_TX_SCTP_CKSUM;
 				break;
 			default:
@@ -1904,12 +1933,20 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
 		case VIRTIO_NET_HDR_GSO_TCPV4:
 		case VIRTIO_NET_HDR_GSO_TCPV6:
+			if (l4_proto != IPPROTO_TCP ||
+					len <= sizeof(struct rte_tcp_hdr))
+				break;
 			tcp_hdr = l4_hdr;
+			if (len <= (tcp_hdr->data_off & 0xf0) >> 2)
+				break;
 			m->ol_flags |= PKT_TX_TCP_SEG;
 			m->tso_segsz = hdr->gso_size;
 			m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
 			break;
 		case VIRTIO_NET_HDR_GSO_UDP:
+			if (l4_proto != IPPROTO_UDP ||
+					len <= sizeof(struct rte_udp_hdr))
+				break;
 			m->ol_flags |= PKT_TX_UDP_SEG;
 			m->tso_segsz = hdr->gso_size;
 			m->l4_len = sizeof(struct rte_udp_hdr);
-- 
2.15.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: add header check in dequeue offload
  2021-03-17  6:31 ` [dpdk-dev] [PATCH v3] " Xiao Wang
@ 2021-04-01 12:04   ` David Marchand
  2021-04-02  8:38     ` Wang, Xiao W
  2021-06-15  6:35   ` [dpdk-dev] [PATCH v4] vhost: check header for legacy " Xiao Wang
  2021-06-21  8:21   ` [dpdk-dev] [PATCH v5] " Xiao Wang
  2 siblings, 1 reply; 20+ messages in thread
From: David Marchand @ 2021-04-01 12:04 UTC (permalink / raw)
  To: Xiao Wang
  Cc: Xia, Chenbo, Maxime Coquelin, Marvin Liu, dev, Ananyev,
	Konstantin, dpdk stable

On Wed, Mar 17, 2021 at 7:50 AM Xiao Wang <xiao.w.wang@intel.com> wrote:
>
> When parsing the virtio net header and packet header for dequeue offload,
> we need to perform sanity check on the packet header to ensure:
>   - No out-of-boundary memory access.
>   - The packet header and virtio_net header are valid and aligned.
>
> Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
> Cc: stable@dpdk.org
>
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>

I spent some time digging on this topic.

Afaiu the offload API, vhost is not supposed to populate tx offloads.
I would drop this whole parse_ethernet function and replace
vhost_dequeue_offload with what virtio does on the rx side.

Please have a look at this series (especially the last patch):
http://patchwork.dpdk.org/project/dpdk/list/?series=16052


Thanks.

-- 
David Marchand


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: add header check in dequeue offload
  2021-04-01 12:04   ` David Marchand
@ 2021-04-02  8:38     ` Wang, Xiao W
  2021-04-12  9:08       ` Wang, Xiao W
  0 siblings, 1 reply; 20+ messages in thread
From: Wang, Xiao W @ 2021-04-02  8:38 UTC (permalink / raw)
  To: David Marchand
  Cc: Xia, Chenbo, Maxime Coquelin, Liu, Yong, dev, Ananyev,
	Konstantin, dpdk stable, yangyi01


> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Thursday, April 1, 2021 8:04 PM
> To: Wang, Xiao W <xiao.w.wang@intel.com>
> Cc: Xia, Chenbo <chenbo.xia@intel.com>; Maxime Coquelin
> <maxime.coquelin@redhat.com>; Liu, Yong <yong.liu@intel.com>; dev
> <dev@dpdk.org>; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> dpdk stable <stable@dpdk.org>
> Subject: Re: [PATCH v3] vhost: add header check in dequeue offload
> 
> On Wed, Mar 17, 2021 at 7:50 AM Xiao Wang <xiao.w.wang@intel.com>
> wrote:
> >
> > When parsing the virtio net header and packet header for dequeue offload,
> > we need to perform sanity check on the packet header to ensure:
> >   - No out-of-boundary memory access.
> >   - The packet header and virtio_net header are valid and aligned.
> >
> > Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> 
> I spent some time digging on this topic.
> 
> Afaiu the offload API, vhost is not supposed to populate tx offloads.
> I would drop this whole parse_ethernet function and replace
> vhost_dequeue_offload with what virtio does on the rx side.
> 
> Please have a look at this series (especially the last patch):
> http://patchwork.dpdk.org/project/dpdk/list/?series=16052
> 
> 
> Thanks.
> 
> --
> David Marchand

+Yang ,Yi into this loop who may have comments especially from OVS perspective on CKSUM/TSO/TSO in tunnel/etc..

I think the original vhost implementation here is to help pass virtio's offload request onto the next output port, either physical device or a virtio device.
If we go with series http://patchwork.dpdk.org/project/dpdk/list/?series=16052, then virtual switch need to do an extra translation on the flags:
e.g. PKT_RX_LRO --> PKT_TX_TCP_SEG. The question is that a packet marked with PKT_RX_LRO may come from different types of ports (non-vhost), how vSwitch can tell if TSO request should be set for this packet at transmission?

If I think from an endpoint app's perspective, I'm inclined to agree with your series. If I think from a switch/router's perspective, I'm inclined to keep the current implementation. Maybe we can add PKT_RX_L4_CKSUM_NONE/PKT_RX_LRO flags into the current implementation, seems this method can cover both scenarios.

BRs,
Xiao




^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: add header check in dequeue offload
  2021-04-02  8:38     ` Wang, Xiao W
@ 2021-04-12  9:08       ` Wang, Xiao W
  2021-04-12  9:33         ` David Marchand
  0 siblings, 1 reply; 20+ messages in thread
From: Wang, Xiao W @ 2021-04-12  9:08 UTC (permalink / raw)
  To: David Marchand, Maxime Coquelin, Xia, Chenbo
  Cc: Liu, Yong, dev, Ananyev, Konstantin, dpdk stable, yangyi01

Hi,

> -----Original Message-----
> From: Wang, Xiao W
> Sent: Friday, April 2, 2021 4:39 PM
> To: David Marchand <david.marchand@redhat.com>
> Cc: Xia, Chenbo <Chenbo.Xia@intel.com>; Maxime Coquelin
> <maxime.coquelin@redhat.com>; Liu, Yong <yong.liu@intel.com>; dev
> <dev@dpdk.org>; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> dpdk stable <stable@dpdk.org>; yangyi01@inspur.com
> Subject: RE: [PATCH v3] vhost: add header check in dequeue offload
> 
> 
> > -----Original Message-----
> > From: David Marchand <david.marchand@redhat.com>
> > Sent: Thursday, April 1, 2021 8:04 PM
> > To: Wang, Xiao W <xiao.w.wang@intel.com>
> > Cc: Xia, Chenbo <chenbo.xia@intel.com>; Maxime Coquelin
> > <maxime.coquelin@redhat.com>; Liu, Yong <yong.liu@intel.com>; dev
> > <dev@dpdk.org>; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> > dpdk stable <stable@dpdk.org>
> > Subject: Re: [PATCH v3] vhost: add header check in dequeue offload
> >
> > On Wed, Mar 17, 2021 at 7:50 AM Xiao Wang <xiao.w.wang@intel.com>
> > wrote:
> > >
> > > When parsing the virtio net header and packet header for dequeue
> offload,
> > > we need to perform sanity check on the packet header to ensure:
> > >   - No out-of-boundary memory access.
> > >   - The packet header and virtio_net header are valid and aligned.
> > >
> > > Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
> > > Cc: stable@dpdk.org
> > >
> > > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> >
> > I spent some time digging on this topic.
> >
> > Afaiu the offload API, vhost is not supposed to populate tx offloads.
> > I would drop this whole parse_ethernet function and replace
> > vhost_dequeue_offload with what virtio does on the rx side.
> >
> > Please have a look at this series (especially the last patch):
> > http://patchwork.dpdk.org/project/dpdk/list/?series=16052
> >
> >
> > Thanks.
> >
> > --
> > David Marchand
> 
> +Yang ,Yi into this loop who may have comments especially from OVS
> perspective on CKSUM/TSO/TSO in tunnel/etc..
> 
> I think the original vhost implementation here is to help pass virtio's offload
> request onto the next output port, either physical device or a virtio device.
> If we go with series
> http://patchwork.dpdk.org/project/dpdk/list/?series=16052, then virtual
> switch need to do an extra translation on the flags:
> e.g. PKT_RX_LRO --> PKT_TX_TCP_SEG. The question is that a packet
> marked with PKT_RX_LRO may come from different types of ports (non-
> vhost), how vSwitch can tell if TSO request should be set for this packet at
> transmission?
> 
> If I think from an endpoint app's perspective, I'm inclined to agree with your
> series. If I think from a switch/router's perspective, I'm inclined to keep the
> current implementation. Maybe we can add
> PKT_RX_L4_CKSUM_NONE/PKT_RX_LRO flags into the current
> implementation, seems this method can cover both scenarios.
> 
> BRs,
> Xiao
> 
> 

Considering the major consumer of vhost API is virtual switch/router, I tend to keep the current implementation and apply this fix patch.
Any comments?

BRs,
Xiao

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: add header check in dequeue offload
  2021-04-12  9:08       ` Wang, Xiao W
@ 2021-04-12  9:33         ` David Marchand
  2021-04-13 14:30           ` Maxime Coquelin
  0 siblings, 1 reply; 20+ messages in thread
From: David Marchand @ 2021-04-12  9:33 UTC (permalink / raw)
  To: Wang, Xiao W
  Cc: Maxime Coquelin, Xia, Chenbo, Liu, Yong, dev, Ananyev,
	Konstantin, dpdk stable, yangyi01

On Mon, Apr 12, 2021 at 11:09 AM Wang, Xiao W <xiao.w.wang@intel.com> wrote:
> Considering the major consumer of vhost API is virtual switch/router, I tend to keep the current implementation and apply this fix patch.
> Any comments?

This is just a hack that bypasses the vswitch control.

It happens to work when the vswitch does nothing.
If anything is done, like popping a vlan header, the vswitch needs to
update l3 offset.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: add header check in dequeue offload
  2021-04-12  9:33         ` David Marchand
@ 2021-04-13 14:30           ` Maxime Coquelin
  2021-05-08  5:54             ` Wang, Xiao W
  0 siblings, 1 reply; 20+ messages in thread
From: Maxime Coquelin @ 2021-04-13 14:30 UTC (permalink / raw)
  To: David Marchand, Wang, Xiao W
  Cc: Xia, Chenbo, Liu, Yong, dev, Ananyev, Konstantin, dpdk stable, yangyi01



On 4/12/21 11:33 AM, David Marchand wrote:
> On Mon, Apr 12, 2021 at 11:09 AM Wang, Xiao W <xiao.w.wang@intel.com> wrote:
>> Considering the major consumer of vhost API is virtual switch/router, I tend to keep the current implementation and apply this fix patch.
>> Any comments?
> 
> This is just a hack that bypasses the vswitch control.
> 
> It happens to work when the vswitch does nothing.
> If anything is done, like popping a vlan header, the vswitch needs to
> update l3 offset.
> 
> 

I agree with David, current behavior is wrong.

Furthermore, when the lib is used via the Vhost PMD, the application
should not have to handle it differently on whether it is Vhost PMD or
any physical NIC PMD.


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: add header check in dequeue offload
  2021-04-13 14:30           ` Maxime Coquelin
@ 2021-05-08  5:54             ` Wang, Xiao W
  0 siblings, 0 replies; 20+ messages in thread
From: Wang, Xiao W @ 2021-05-08  5:54 UTC (permalink / raw)
  To: Maxime Coquelin, David Marchand
  Cc: Xia, Chenbo, Liu, Yong, dev, Ananyev, Konstantin, dpdk stable, yangyi01

Hi Maxime and David,

I see patch " vhost: fix offload flags in Rx path " http://patches.dpdk.org/project/dpdk/patch/20210503164344.27916-4-david.marchand@redhat.com/ has been merged, and the legacy implementation is kept. Do you think we still need to fix the header check for the legacy implementation?

BRs,
Xiao

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Tuesday, April 13, 2021 10:31 PM
> To: David Marchand <david.marchand@redhat.com>; Wang, Xiao W
> <xiao.w.wang@intel.com>
> Cc: Xia, Chenbo <chenbo.xia@intel.com>; Liu, Yong <yong.liu@intel.com>;
> dev <dev@dpdk.org>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; dpdk stable <stable@dpdk.org>;
> yangyi01@inspur.com
> Subject: Re: [PATCH v3] vhost: add header check in dequeue offload
> 
> 
> 
> On 4/12/21 11:33 AM, David Marchand wrote:
> > On Mon, Apr 12, 2021 at 11:09 AM Wang, Xiao W
> <xiao.w.wang@intel.com> wrote:
> >> Considering the major consumer of vhost API is virtual switch/router, I
> tend to keep the current implementation and apply this fix patch.
> >> Any comments?
> >
> > This is just a hack that bypasses the vswitch control.
> >
> > It happens to work when the vswitch does nothing.
> > If anything is done, like popping a vlan header, the vswitch needs to
> > update l3 offset.
> >
> >
> 
> I agree with David, current behavior is wrong.
> 
> Furthermore, when the lib is used via the Vhost PMD, the application
> should not have to handle it differently on whether it is Vhost PMD or
> any physical NIC PMD.


^ permalink raw reply	[flat|nested] 20+ messages in thread

* [dpdk-dev] [PATCH v4] vhost: check header for legacy dequeue offload
  2021-03-17  6:31 ` [dpdk-dev] [PATCH v3] " Xiao Wang
  2021-04-01 12:04   ` David Marchand
@ 2021-06-15  6:35   ` Xiao Wang
  2021-06-15  7:57     ` David Marchand
  2021-06-21  8:21   ` [dpdk-dev] [PATCH v5] " Xiao Wang
  2 siblings, 1 reply; 20+ messages in thread
From: Xiao Wang @ 2021-06-15  6:35 UTC (permalink / raw)
  To: maxime.coquelin, chenbo.xia; +Cc: cheng1.jiang, dev, Xiao Wang, stable

When parsing the virtio net header and packet header for dequeue offload,
we need to perform sanity check on the packet header to ensure:
  - No out-of-boundary memory access.
  - The packet header and virtio_net header are valid and aligned.

Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
Cc: stable@dpdk.org

Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
---
v4:
- Rebase on head of main branch.
- Allow empty L4 payload in GSO.

v3:
- Check data_len before calling rte_pktmbuf_mtod. (David)

v2:
- Allow empty L4 payload for cksum offload. (Konstantin)
---
 lib/vhost/virtio_net.c | 52 +++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 45 insertions(+), 7 deletions(-)

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 8da8a86a10..351ff0a841 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2259,44 +2259,64 @@ virtio_net_with_host_offload(struct virtio_net *dev)
 	return false;
 }
 
-static void
-parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
+static int
+parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr,
+		uint16_t *len)
 {
 	struct rte_ipv4_hdr *ipv4_hdr;
 	struct rte_ipv6_hdr *ipv6_hdr;
 	void *l3_hdr = NULL;
 	struct rte_ether_hdr *eth_hdr;
 	uint16_t ethertype;
+	uint16_t data_len = m->data_len;
+
+	if (data_len <= sizeof(struct rte_ether_hdr))
+		return -EINVAL;
 
 	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
 	m->l2_len = sizeof(struct rte_ether_hdr);
 	ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
+	data_len -= sizeof(struct rte_ether_hdr);
 
 	if (ethertype == RTE_ETHER_TYPE_VLAN) {
+		if (data_len <= sizeof(struct rte_vlan_hdr))
+			return -EINVAL;
+
 		struct rte_vlan_hdr *vlan_hdr =
 			(struct rte_vlan_hdr *)(eth_hdr + 1);
 
 		m->l2_len += sizeof(struct rte_vlan_hdr);
 		ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
+		data_len -= sizeof(struct rte_vlan_hdr);
 	}
 
 	l3_hdr = (char *)eth_hdr + m->l2_len;
 
 	switch (ethertype) {
 	case RTE_ETHER_TYPE_IPV4:
+		if (data_len <= sizeof(struct rte_ipv4_hdr))
+			return -EINVAL;
 		ipv4_hdr = l3_hdr;
 		*l4_proto = ipv4_hdr->next_proto_id;
 		m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
+		if (data_len <= m->l3_len) {
+			m->l3_len = 0;
+			return -EINVAL;
+		}
 		*l4_hdr = (char *)l3_hdr + m->l3_len;
 		m->ol_flags |= PKT_TX_IPV4;
+		data_len -= m->l3_len;
 		break;
 	case RTE_ETHER_TYPE_IPV6:
+		if (data_len <= sizeof(struct rte_ipv6_hdr))
+			return -EINVAL;
 		ipv6_hdr = l3_hdr;
 		*l4_proto = ipv6_hdr->proto;
 		m->l3_len = sizeof(struct rte_ipv6_hdr);
 		*l4_hdr = (char *)l3_hdr + m->l3_len;
 		m->ol_flags |= PKT_TX_IPV6;
+		data_len -= m->l3_len;
 		break;
 	default:
 		m->l3_len = 0;
@@ -2304,6 +2324,9 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
 		*l4_hdr = NULL;
 		break;
 	}
+
+	*len = data_len;
+	return 0;
 }
 
 static __rte_always_inline void
@@ -2312,21 +2335,27 @@ vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 	uint16_t l4_proto = 0;
 	void *l4_hdr = NULL;
 	struct rte_tcp_hdr *tcp_hdr = NULL;
+	uint16_t len = 0, tcp_len;
+
+	if (parse_ethernet(m, &l4_proto, &l4_hdr, &len) < 0)
+		return;
 
-	parse_ethernet(m, &l4_proto, &l4_hdr);
 	if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 		if (hdr->csum_start == (m->l2_len + m->l3_len)) {
 			switch (hdr->csum_offset) {
 			case (offsetof(struct rte_tcp_hdr, cksum)):
-				if (l4_proto == IPPROTO_TCP)
+				if (l4_proto == IPPROTO_TCP &&
+					len >= sizeof(struct rte_tcp_hdr))
 					m->ol_flags |= PKT_TX_TCP_CKSUM;
 				break;
 			case (offsetof(struct rte_udp_hdr, dgram_cksum)):
-				if (l4_proto == IPPROTO_UDP)
+				if (l4_proto == IPPROTO_UDP &&
+					len >= sizeof(struct rte_udp_hdr))
 					m->ol_flags |= PKT_TX_UDP_CKSUM;
 				break;
 			case (offsetof(struct rte_sctp_hdr, cksum)):
-				if (l4_proto == IPPROTO_SCTP)
+				if (l4_proto == IPPROTO_SCTP &&
+					len >= sizeof(struct rte_sctp_hdr))
 					m->ol_flags |= PKT_TX_SCTP_CKSUM;
 				break;
 			default:
@@ -2339,12 +2368,21 @@ vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
 		case VIRTIO_NET_HDR_GSO_TCPV4:
 		case VIRTIO_NET_HDR_GSO_TCPV6:
+			if (l4_proto != IPPROTO_TCP ||
+				len < sizeof(struct rte_tcp_hdr))
+				break;
 			tcp_hdr = l4_hdr;
+			tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
+			if (len < tcp_len)
+				break;
 			m->ol_flags |= PKT_TX_TCP_SEG;
 			m->tso_segsz = hdr->gso_size;
-			m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+			m->l4_len = tcp_len;
 			break;
 		case VIRTIO_NET_HDR_GSO_UDP:
+			if (l4_proto != IPPROTO_UDP ||
+				len < sizeof(struct rte_udp_hdr))
+				break;
 			m->ol_flags |= PKT_TX_UDP_SEG;
 			m->tso_segsz = hdr->gso_size;
 			m->l4_len = sizeof(struct rte_udp_hdr);
-- 
2.15.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v4] vhost: check header for legacy dequeue offload
  2021-06-15  6:35   ` [dpdk-dev] [PATCH v4] vhost: check header for legacy " Xiao Wang
@ 2021-06-15  7:57     ` David Marchand
  2021-06-16 14:33       ` Wang, Xiao W
  0 siblings, 1 reply; 20+ messages in thread
From: David Marchand @ 2021-06-15  7:57 UTC (permalink / raw)
  To: Xiao Wang; +Cc: Maxime Coquelin, Xia, Chenbo, Cheng Jiang, dev, dpdk stable

On Tue, Jun 15, 2021 at 9:06 AM Xiao Wang <xiao.w.wang@intel.com> wrote:
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 8da8a86a10..351ff0a841 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -2259,44 +2259,64 @@ virtio_net_with_host_offload(struct virtio_net *dev)
>         return false;
>  }
>
> -static void
> -parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
> +static int
> +parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr,
> +               uint16_t *len)
>  {


This function name is misleading, name could be parse_headers().
Its semantic gets more and more confusing with those l4_hdr and len pointers.

This function fills ->lX_len in the mbuf, everything is available for caller.

Caller can check that rte_pktmbuf_data_len() is >= m->l2_len +
m->l3_len + somesize.
=> no need for len.

l4_hdr can simply be deduced with rte_pktmbuf_mtod_offset(m, struct
somestruct *, m->l2_len + m->l3_len).
=> no need for l4_hdr.


>         struct rte_ipv4_hdr *ipv4_hdr;
>         struct rte_ipv6_hdr *ipv6_hdr;
>         void *l3_hdr = NULL;

No need for l3_hdr.


>         struct rte_ether_hdr *eth_hdr;
>         uint16_t ethertype;
> +       uint16_t data_len = m->data_len;

Avoid direct access to mbuf internals, we have inline helpers:
rte_pktmbuf_data_len(m).


> +
> +       if (data_len <= sizeof(struct rte_ether_hdr))

Strictly speaking, < is enough.


> +               return -EINVAL;
>
>         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
>
>         m->l2_len = sizeof(struct rte_ether_hdr);
>         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
> +       data_len -= sizeof(struct rte_ether_hdr);

No need to decrement data_len if checks below are all done for absolute value.
See suggestions below.


>
>         if (ethertype == RTE_ETHER_TYPE_VLAN) {
> +               if (data_len <= sizeof(struct rte_vlan_hdr))
> +                       return -EINVAL;

if (data_len < sizeof(rte_ether_hdr) + sizeof(struct rte_vlan_hdr))


> +
>                 struct rte_vlan_hdr *vlan_hdr =
>                         (struct rte_vlan_hdr *)(eth_hdr + 1);
>
>                 m->l2_len += sizeof(struct rte_vlan_hdr);
>                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
> +               data_len -= sizeof(struct rte_vlan_hdr);

Idem.


>         }
>
>         l3_hdr = (char *)eth_hdr + m->l2_len;
>
>         switch (ethertype) {
>         case RTE_ETHER_TYPE_IPV4:
> +               if (data_len <= sizeof(struct rte_ipv4_hdr))
> +                       return -EINVAL;

if (data_len < m->l2_len + sizeof(struct rte_ipv4_hdr))


>                 ipv4_hdr = l3_hdr;

ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->l2_len);


>                 *l4_proto = ipv4_hdr->next_proto_id;
>                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
> +               if (data_len <= m->l3_len) {

if (data_len < m->l2_len + m->l3_len)


> +                       m->l3_len = 0;
> +                       return -EINVAL;

Returning here leaves m->l2_len set.


> +               }
>                 *l4_hdr = (char *)l3_hdr + m->l3_len;
>                 m->ol_flags |= PKT_TX_IPV4;
> +               data_len -= m->l3_len;
>                 break;
>         case RTE_ETHER_TYPE_IPV6:
> +               if (data_len <= sizeof(struct rte_ipv6_hdr))
> +                       return -EINVAL;

if (data_len < m->l2_len + sizeof(struct rte_ipv6_hdr))
Returning here leaves m->l2_len set.


>                 ipv6_hdr = l3_hdr;

ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, m->l2_len);


>                 *l4_proto = ipv6_hdr->proto;
>                 m->l3_len = sizeof(struct rte_ipv6_hdr);
>                 *l4_hdr = (char *)l3_hdr + m->l3_len;
>                 m->ol_flags |= PKT_TX_IPV6;
> +               data_len -= m->l3_len;
>                 break;
>         default:
>                 m->l3_len = 0;
> @@ -2304,6 +2324,9 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
>                 *l4_hdr = NULL;
>                 break;
>         }
> +
> +       *len = data_len;
> +       return 0;
>  }
>
>  static __rte_always_inline void
> @@ -2312,21 +2335,27 @@ vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
>         uint16_t l4_proto = 0;
>         void *l4_hdr = NULL;
>         struct rte_tcp_hdr *tcp_hdr = NULL;
> +       uint16_t len = 0, tcp_len;
> +
> +       if (parse_ethernet(m, &l4_proto, &l4_hdr, &len) < 0)
> +               return;
>
> -       parse_ethernet(m, &l4_proto, &l4_hdr);
>         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
>                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
>                         switch (hdr->csum_offset) {
>                         case (offsetof(struct rte_tcp_hdr, cksum)):
> -                               if (l4_proto == IPPROTO_TCP)
> +                               if (l4_proto == IPPROTO_TCP &&
> +                                       len >= sizeof(struct rte_tcp_hdr))

if (rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len + sizeof(struct
rte_tcp_hdr))
Then, if this check is wrong, we leave l2_len, l3_len + PKT_TX_IPVx
flag set in mbuf.

These two comments apply to other updates below.

>                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
>                                 break;
>                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
> -                               if (l4_proto == IPPROTO_UDP)
> +                               if (l4_proto == IPPROTO_UDP &&
> +                                       len >= sizeof(struct rte_udp_hdr))
>                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
>                                 break;
>                         case (offsetof(struct rte_sctp_hdr, cksum)):
> -                               if (l4_proto == IPPROTO_SCTP)
> +                               if (l4_proto == IPPROTO_SCTP &&
> +                                       len >= sizeof(struct rte_sctp_hdr))
>                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
>                                 break;
>                         default:
> @@ -2339,12 +2368,21 @@ vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
>                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
>                 case VIRTIO_NET_HDR_GSO_TCPV4:
>                 case VIRTIO_NET_HDR_GSO_TCPV6:
> +                       if (l4_proto != IPPROTO_TCP ||
> +                               len < sizeof(struct rte_tcp_hdr))
> +                               break;
>                         tcp_hdr = l4_hdr;

tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, m->l2_len +
m->l3_len);



> +                       tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
> +                       if (len < tcp_len)
> +                               break;
>                         m->ol_flags |= PKT_TX_TCP_SEG;
>                         m->tso_segsz = hdr->gso_size;
> -                       m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
> +                       m->l4_len = tcp_len;
>                         break;
>                 case VIRTIO_NET_HDR_GSO_UDP:
> +                       if (l4_proto != IPPROTO_UDP ||
> +                               len < sizeof(struct rte_udp_hdr))
> +                               break;
>                         m->ol_flags |= PKT_TX_UDP_SEG;
>                         m->tso_segsz = hdr->gso_size;
>                         m->l4_len = sizeof(struct rte_udp_hdr);
> --
> 2.15.1
>


-- 
David Marchand


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v4] vhost: check header for legacy dequeue offload
  2021-06-15  7:57     ` David Marchand
@ 2021-06-16 14:33       ` Wang, Xiao W
  0 siblings, 0 replies; 20+ messages in thread
From: Wang, Xiao W @ 2021-06-16 14:33 UTC (permalink / raw)
  To: David Marchand
  Cc: Maxime Coquelin, Xia, Chenbo, Jiang, Cheng1, dev, dpdk stable

Hi David,

Thanks for your comments.
I agree with your suggestions. BTW, I notice some other invalid corner cases which need rolling back mbuf->l2_len, l3_len and ol_flag.
E.g. the default case in the "switch {}" context is not valid.
BTW, l4_proto variable is better to be a uint8_t, rather than uint16_t.

I will prepare a new version.

BRs,
Xiao

> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Tuesday, June 15, 2021 3:57 PM
> To: Wang, Xiao W <xiao.w.wang@intel.com>
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>; Xia, Chenbo
> <chenbo.xia@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>; dev
> <dev@dpdk.org>; dpdk stable <stable@dpdk.org>
> Subject: Re: [dpdk-dev] [PATCH v4] vhost: check header for legacy dequeue
> offload
> 
> On Tue, Jun 15, 2021 at 9:06 AM Xiao Wang <xiao.w.wang@intel.com>
> wrote:
> > diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> > index 8da8a86a10..351ff0a841 100644
> > --- a/lib/vhost/virtio_net.c
> > +++ b/lib/vhost/virtio_net.c
> > @@ -2259,44 +2259,64 @@ virtio_net_with_host_offload(struct
> virtio_net *dev)
> >         return false;
> >  }
> >
> > -static void
> > -parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
> > +static int
> > +parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr,
> > +               uint16_t *len)
> >  {
> 
> 
> This function name is misleading, name could be parse_headers().
> Its semantic gets more and more confusing with those l4_hdr and len
> pointers.
> 
> This function fills ->lX_len in the mbuf, everything is available for caller.
> 
> Caller can check that rte_pktmbuf_data_len() is >= m->l2_len +
> m->l3_len + somesize.
> => no need for len.
> 
> l4_hdr can simply be deduced with rte_pktmbuf_mtod_offset(m, struct
> somestruct *, m->l2_len + m->l3_len).
> => no need for l4_hdr.
> 
> 
> >         struct rte_ipv4_hdr *ipv4_hdr;
> >         struct rte_ipv6_hdr *ipv6_hdr;
> >         void *l3_hdr = NULL;
> 
> No need for l3_hdr.
> 
> 
> >         struct rte_ether_hdr *eth_hdr;
> >         uint16_t ethertype;
> > +       uint16_t data_len = m->data_len;
> 
> Avoid direct access to mbuf internals, we have inline helpers:
> rte_pktmbuf_data_len(m).
> 
> 
> > +
> > +       if (data_len <= sizeof(struct rte_ether_hdr))
> 
> Strictly speaking, < is enough.
> 
> 
> > +               return -EINVAL;
> >
> >         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
> >
> >         m->l2_len = sizeof(struct rte_ether_hdr);
> >         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
> > +       data_len -= sizeof(struct rte_ether_hdr);
> 
> No need to decrement data_len if checks below are all done for absolute
> value.
> See suggestions below.
> 
> 
> >
> >         if (ethertype == RTE_ETHER_TYPE_VLAN) {
> > +               if (data_len <= sizeof(struct rte_vlan_hdr))
> > +                       return -EINVAL;
> 
> if (data_len < sizeof(rte_ether_hdr) + sizeof(struct rte_vlan_hdr))
> 
> 
> > +
> >                 struct rte_vlan_hdr *vlan_hdr =
> >                         (struct rte_vlan_hdr *)(eth_hdr + 1);
> >
> >                 m->l2_len += sizeof(struct rte_vlan_hdr);
> >                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
> > +               data_len -= sizeof(struct rte_vlan_hdr);
> 
> Idem.
> 
> 
> >         }
> >
> >         l3_hdr = (char *)eth_hdr + m->l2_len;
> >
> >         switch (ethertype) {
> >         case RTE_ETHER_TYPE_IPV4:
> > +               if (data_len <= sizeof(struct rte_ipv4_hdr))
> > +                       return -EINVAL;
> 
> if (data_len < m->l2_len + sizeof(struct rte_ipv4_hdr))
> 
> 
> >                 ipv4_hdr = l3_hdr;
> 
> ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->l2_len);
> 
> 
> >                 *l4_proto = ipv4_hdr->next_proto_id;
> >                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
> > +               if (data_len <= m->l3_len) {
> 
> if (data_len < m->l2_len + m->l3_len)
> 
> 
> > +                       m->l3_len = 0;
> > +                       return -EINVAL;
> 
> Returning here leaves m->l2_len set.
> 
> 
> > +               }
> >                 *l4_hdr = (char *)l3_hdr + m->l3_len;
> >                 m->ol_flags |= PKT_TX_IPV4;
> > +               data_len -= m->l3_len;
> >                 break;
> >         case RTE_ETHER_TYPE_IPV6:
> > +               if (data_len <= sizeof(struct rte_ipv6_hdr))
> > +                       return -EINVAL;
> 
> if (data_len < m->l2_len + sizeof(struct rte_ipv6_hdr))
> Returning here leaves m->l2_len set.
> 
> 
> >                 ipv6_hdr = l3_hdr;
> 
> ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, m->l2_len);
> 
> 
> >                 *l4_proto = ipv6_hdr->proto;
> >                 m->l3_len = sizeof(struct rte_ipv6_hdr);
> >                 *l4_hdr = (char *)l3_hdr + m->l3_len;
> >                 m->ol_flags |= PKT_TX_IPV6;
> > +               data_len -= m->l3_len;
> >                 break;
> >         default:
> >                 m->l3_len = 0;
> > @@ -2304,6 +2324,9 @@ parse_ethernet(struct rte_mbuf *m, uint16_t
> *l4_proto, void **l4_hdr)
> >                 *l4_hdr = NULL;
> >                 break;
> >         }
> > +
> > +       *len = data_len;
> > +       return 0;
> >  }
> >
> >  static __rte_always_inline void
> > @@ -2312,21 +2335,27 @@ vhost_dequeue_offload_legacy(struct
> virtio_net_hdr *hdr, struct rte_mbuf *m)
> >         uint16_t l4_proto = 0;
> >         void *l4_hdr = NULL;
> >         struct rte_tcp_hdr *tcp_hdr = NULL;
> > +       uint16_t len = 0, tcp_len;
> > +
> > +       if (parse_ethernet(m, &l4_proto, &l4_hdr, &len) < 0)
> > +               return;
> >
> > -       parse_ethernet(m, &l4_proto, &l4_hdr);
> >         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
> >                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
> >                         switch (hdr->csum_offset) {
> >                         case (offsetof(struct rte_tcp_hdr, cksum)):
> > -                               if (l4_proto == IPPROTO_TCP)
> > +                               if (l4_proto == IPPROTO_TCP &&
> > +                                       len >= sizeof(struct rte_tcp_hdr))
> 
> if (rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len + sizeof(struct
> rte_tcp_hdr))
> Then, if this check is wrong, we leave l2_len, l3_len + PKT_TX_IPVx
> flag set in mbuf.
> 
> These two comments apply to other updates below.
> 
> >                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
> >                                 break;
> >                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
> > -                               if (l4_proto == IPPROTO_UDP)
> > +                               if (l4_proto == IPPROTO_UDP &&
> > +                                       len >= sizeof(struct rte_udp_hdr))
> >                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
> >                                 break;
> >                         case (offsetof(struct rte_sctp_hdr, cksum)):
> > -                               if (l4_proto == IPPROTO_SCTP)
> > +                               if (l4_proto == IPPROTO_SCTP &&
> > +                                       len >= sizeof(struct rte_sctp_hdr))
> >                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
> >                                 break;
> >                         default:
> > @@ -2339,12 +2368,21 @@ vhost_dequeue_offload_legacy(struct
> virtio_net_hdr *hdr, struct rte_mbuf *m)
> >                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
> >                 case VIRTIO_NET_HDR_GSO_TCPV4:
> >                 case VIRTIO_NET_HDR_GSO_TCPV6:
> > +                       if (l4_proto != IPPROTO_TCP ||
> > +                               len < sizeof(struct rte_tcp_hdr))
> > +                               break;
> >                         tcp_hdr = l4_hdr;
> 
> tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *, m->l2_len +
> m->l3_len);
> 
> 
> 
> > +                       tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
> > +                       if (len < tcp_len)
> > +                               break;
> >                         m->ol_flags |= PKT_TX_TCP_SEG;
> >                         m->tso_segsz = hdr->gso_size;
> > -                       m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
> > +                       m->l4_len = tcp_len;
> >                         break;
> >                 case VIRTIO_NET_HDR_GSO_UDP:
> > +                       if (l4_proto != IPPROTO_UDP ||
> > +                               len < sizeof(struct rte_udp_hdr))
> > +                               break;
> >                         m->ol_flags |= PKT_TX_UDP_SEG;
> >                         m->tso_segsz = hdr->gso_size;
> >                         m->l4_len = sizeof(struct rte_udp_hdr);
> > --
> > 2.15.1
> >
> 
> 
> --
> David Marchand


^ permalink raw reply	[flat|nested] 20+ messages in thread

* [dpdk-dev] [PATCH v5] vhost: check header for legacy dequeue offload
  2021-03-17  6:31 ` [dpdk-dev] [PATCH v3] " Xiao Wang
  2021-04-01 12:04   ` David Marchand
  2021-06-15  6:35   ` [dpdk-dev] [PATCH v4] vhost: check header for legacy " Xiao Wang
@ 2021-06-21  8:21   ` Xiao Wang
  2021-07-13  9:34     ` Maxime Coquelin
  2021-07-20  2:43     ` Xia, Chenbo
  2 siblings, 2 replies; 20+ messages in thread
From: Xiao Wang @ 2021-06-21  8:21 UTC (permalink / raw)
  To: maxime.coquelin, chenbo.xia, david.marchand
  Cc: cheng1.jiang, dev, Xiao Wang, stable

When parsing the virtio net header and packet header for dequeue offload,
we need to perform sanity check on the packet header to ensure:
  - No out-of-boundary memory access.
  - The packet header and virtio_net header are valid and aligned.

Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
Cc: stable@dpdk.org

Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
---
v5:
- Redefine the function parse_ethernet() to parse_headers(). (David)
- Use mbuf helpers e.g. rte_pktmbuf_data_len() and rte_pktmbuf_mtod_offset(). (David)
- Reset mbuf l2_len, l3_len and ol_flags when detecting anything invalid. (David)
- Improve some check conditions. (David)
- Move the data_len check for L4 header into parse_headers(), in order to avoid
  duplicated checks in CSUM and GSO.
- Use uint8_t instead of uint16_t for l4_proto variable.
- Detect more invalid corner cases.

v4:
- Rebase on head of main branch.
- Allow empty L4 payload in GSO.

v3:
- Check data_len before calling rte_pktmbuf_mtod. (David)

v2:
- Allow empty L4 payload for cksum offload. (Konstantin)
---
 lib/vhost/virtio_net.c | 117 +++++++++++++++++++++++++++++++++++++------------
 1 file changed, 89 insertions(+), 28 deletions(-)

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 8da8a86a10..fb21b56558 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2259,14 +2259,17 @@ virtio_net_with_host_offload(struct virtio_net *dev)
 	return false;
 }
 
-static void
-parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
+static int
+parse_headers(struct rte_mbuf *m, uint8_t *l4_proto)
 {
 	struct rte_ipv4_hdr *ipv4_hdr;
 	struct rte_ipv6_hdr *ipv6_hdr;
-	void *l3_hdr = NULL;
 	struct rte_ether_hdr *eth_hdr;
 	uint16_t ethertype;
+	uint16_t data_len = rte_pktmbuf_data_len(m);
+
+	if (data_len < sizeof(struct rte_ether_hdr))
+		return -EINVAL;
 
 	eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
@@ -2274,6 +2277,10 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
 	ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
 
 	if (ethertype == RTE_ETHER_TYPE_VLAN) {
+		if (data_len < sizeof(struct rte_ether_hdr) +
+				sizeof(struct rte_vlan_hdr))
+			goto error;
+
 		struct rte_vlan_hdr *vlan_hdr =
 			(struct rte_vlan_hdr *)(eth_hdr + 1);
 
@@ -2281,70 +2288,118 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
 		ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
 	}
 
-	l3_hdr = (char *)eth_hdr + m->l2_len;
-
 	switch (ethertype) {
 	case RTE_ETHER_TYPE_IPV4:
-		ipv4_hdr = l3_hdr;
-		*l4_proto = ipv4_hdr->next_proto_id;
+		if (data_len < m->l2_len + sizeof(struct rte_ipv4_hdr))
+			goto error;
+		ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+				m->l2_len);
 		m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
-		*l4_hdr = (char *)l3_hdr + m->l3_len;
+		if (data_len < m->l2_len + m->l3_len)
+			goto error;
 		m->ol_flags |= PKT_TX_IPV4;
+		*l4_proto = ipv4_hdr->next_proto_id;
 		break;
 	case RTE_ETHER_TYPE_IPV6:
-		ipv6_hdr = l3_hdr;
-		*l4_proto = ipv6_hdr->proto;
+		if (data_len < m->l2_len + sizeof(struct rte_ipv6_hdr))
+			goto error;
+		ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
+				m->l2_len);
 		m->l3_len = sizeof(struct rte_ipv6_hdr);
-		*l4_hdr = (char *)l3_hdr + m->l3_len;
 		m->ol_flags |= PKT_TX_IPV6;
+		*l4_proto = ipv6_hdr->proto;
 		break;
 	default:
-		m->l3_len = 0;
-		*l4_proto = 0;
-		*l4_hdr = NULL;
+		/* a valid L3 header is needed for further L4 parsing */
+		goto error;
+	}
+
+	/* both CSUM and GSO need a valid L4 header */
+	switch (*l4_proto) {
+	case IPPROTO_TCP:
+		if (data_len < m->l2_len + m->l3_len +
+				sizeof(struct rte_tcp_hdr))
+			goto error;
+		break;
+	case IPPROTO_UDP:
+		if (data_len < m->l2_len + m->l3_len +
+				sizeof(struct rte_udp_hdr))
+			goto error;
 		break;
+	case IPPROTO_SCTP:
+		if (data_len < m->l2_len + m->l3_len +
+				sizeof(struct rte_sctp_hdr))
+			goto error;
+		break;
+	default:
+		goto error;
 	}
+
+	return 0;
+
+error:
+	m->l2_len = 0;
+	m->l3_len = 0;
+	m->ol_flags = 0;
+	return -EINVAL;
 }
 
 static __rte_always_inline void
 vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 {
-	uint16_t l4_proto = 0;
-	void *l4_hdr = NULL;
+	uint8_t l4_proto = 0;
 	struct rte_tcp_hdr *tcp_hdr = NULL;
+	uint16_t tcp_len;
+	uint16_t data_len = rte_pktmbuf_data_len(m);
+
+	if (parse_headers(m, &l4_proto) < 0)
+		return;
 
-	parse_ethernet(m, &l4_proto, &l4_hdr);
 	if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 		if (hdr->csum_start == (m->l2_len + m->l3_len)) {
 			switch (hdr->csum_offset) {
 			case (offsetof(struct rte_tcp_hdr, cksum)):
-				if (l4_proto == IPPROTO_TCP)
-					m->ol_flags |= PKT_TX_TCP_CKSUM;
+				if (l4_proto != IPPROTO_TCP)
+					goto error;
+				m->ol_flags |= PKT_TX_TCP_CKSUM;
 				break;
 			case (offsetof(struct rte_udp_hdr, dgram_cksum)):
-				if (l4_proto == IPPROTO_UDP)
-					m->ol_flags |= PKT_TX_UDP_CKSUM;
+				if (l4_proto != IPPROTO_UDP)
+					goto error;
+				m->ol_flags |= PKT_TX_UDP_CKSUM;
 				break;
 			case (offsetof(struct rte_sctp_hdr, cksum)):
-				if (l4_proto == IPPROTO_SCTP)
-					m->ol_flags |= PKT_TX_SCTP_CKSUM;
+				if (l4_proto != IPPROTO_SCTP)
+					goto error;
+				m->ol_flags |= PKT_TX_SCTP_CKSUM;
 				break;
 			default:
-				break;
+				goto error;
 			}
+		} else {
+			goto error;
 		}
 	}
 
-	if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
 		case VIRTIO_NET_HDR_GSO_TCPV4:
 		case VIRTIO_NET_HDR_GSO_TCPV6:
-			tcp_hdr = l4_hdr;
+			if (l4_proto != IPPROTO_TCP)
+				goto error;
+			tcp_hdr = rte_pktmbuf_mtod_offset(m,
+					struct rte_tcp_hdr *,
+					m->l2_len + m->l3_len);
+			tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
+			if (data_len < m->l2_len + m->l3_len + tcp_len)
+				goto error;
 			m->ol_flags |= PKT_TX_TCP_SEG;
 			m->tso_segsz = hdr->gso_size;
-			m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+			m->l4_len = tcp_len;
 			break;
 		case VIRTIO_NET_HDR_GSO_UDP:
+			if (l4_proto != IPPROTO_UDP)
+				goto error;
 			m->ol_flags |= PKT_TX_UDP_SEG;
 			m->tso_segsz = hdr->gso_size;
 			m->l4_len = sizeof(struct rte_udp_hdr);
@@ -2352,9 +2407,15 @@ vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 		default:
 			VHOST_LOG_DATA(WARNING,
 				"unsupported gso type %u.\n", hdr->gso_type);
-			break;
+			goto error;
 		}
 	}
+	return;
+
+error:
+	m->l2_len = 0;
+	m->l3_len = 0;
+	m->ol_flags = 0;
 }
 
 static __rte_always_inline void
-- 
2.15.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v5] vhost: check header for legacy dequeue offload
  2021-06-21  8:21   ` [dpdk-dev] [PATCH v5] " Xiao Wang
@ 2021-07-13  9:34     ` Maxime Coquelin
  2021-07-20  2:43     ` Xia, Chenbo
  1 sibling, 0 replies; 20+ messages in thread
From: Maxime Coquelin @ 2021-07-13  9:34 UTC (permalink / raw)
  To: Xiao Wang, chenbo.xia, david.marchand; +Cc: cheng1.jiang, dev, stable

Hi Xiao,

On 6/21/21 10:21 AM, Xiao Wang wrote:
> When parsing the virtio net header and packet header for dequeue offload,
> we need to perform sanity check on the packet header to ensure:
>   - No out-of-boundary memory access.
>   - The packet header and virtio_net header are valid and aligned.
> 
> Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> ---
> v5:
> - Redefine the function parse_ethernet() to parse_headers(). (David)
> - Use mbuf helpers e.g. rte_pktmbuf_data_len() and rte_pktmbuf_mtod_offset(). (David)
> - Reset mbuf l2_len, l3_len and ol_flags when detecting anything invalid. (David)
> - Improve some check conditions. (David)
> - Move the data_len check for L4 header into parse_headers(), in order to avoid
>   duplicated checks in CSUM and GSO.
> - Use uint8_t instead of uint16_t for l4_proto variable.
> - Detect more invalid corner cases.
> 
> v4:
> - Rebase on head of main branch.
> - Allow empty L4 payload in GSO.
> 
> v3:
> - Check data_len before calling rte_pktmbuf_mtod. (David)
> 
> v2:
> - Allow empty L4 payload for cksum offload. (Konstantin)
> ---
>  lib/vhost/virtio_net.c | 117 +++++++++++++++++++++++++++++++++++++------------
>  1 file changed, 89 insertions(+), 28 deletions(-)
> 

Thanks for the fix, it looks good to me:

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [dpdk-dev] [PATCH v5] vhost: check header for legacy dequeue offload
  2021-06-21  8:21   ` [dpdk-dev] [PATCH v5] " Xiao Wang
  2021-07-13  9:34     ` Maxime Coquelin
@ 2021-07-20  2:43     ` Xia, Chenbo
  1 sibling, 0 replies; 20+ messages in thread
From: Xia, Chenbo @ 2021-07-20  2:43 UTC (permalink / raw)
  To: Wang, Xiao W, maxime.coquelin, david.marchand; +Cc: Jiang, Cheng1, dev, stable

> -----Original Message-----
> From: Wang, Xiao W <xiao.w.wang@intel.com>
> Sent: Monday, June 21, 2021 4:21 PM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>;
> david.marchand@redhat.com
> Cc: Jiang, Cheng1 <cheng1.jiang@intel.com>; dev@dpdk.org; Wang, Xiao W
> <xiao.w.wang@intel.com>; stable@dpdk.org
> Subject: [PATCH v5] vhost: check header for legacy dequeue offload
> 
> When parsing the virtio net header and packet header for dequeue offload,
> we need to perform sanity check on the packet header to ensure:
>   - No out-of-boundary memory access.
>   - The packet header and virtio_net header are valid and aligned.
> 
> Fixes: d0cf91303d73 ("vhost: add Tx offload capabilities")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> 2.15.1

Applied to next-virtio/main, thanks.

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2021-07-20  2:43 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-11  6:38 [dpdk-dev] [PATCH] vhost: add header check in dequeue offload Xiao Wang
2021-03-11 10:38 ` Ananyev, Konstantin
2021-03-12 15:19   ` Wang, Xiao W
2021-03-15 15:32 ` [dpdk-dev] [PATCH v2] " Xiao Wang
2021-03-15 16:17   ` [dpdk-dev] [dpdk-stable] " David Marchand
2021-03-15 18:53     ` Ananyev, Konstantin
2021-03-16  9:13       ` Wang, Xiao W
2021-03-17  6:31 ` [dpdk-dev] [PATCH v3] " Xiao Wang
2021-04-01 12:04   ` David Marchand
2021-04-02  8:38     ` Wang, Xiao W
2021-04-12  9:08       ` Wang, Xiao W
2021-04-12  9:33         ` David Marchand
2021-04-13 14:30           ` Maxime Coquelin
2021-05-08  5:54             ` Wang, Xiao W
2021-06-15  6:35   ` [dpdk-dev] [PATCH v4] vhost: check header for legacy " Xiao Wang
2021-06-15  7:57     ` David Marchand
2021-06-16 14:33       ` Wang, Xiao W
2021-06-21  8:21   ` [dpdk-dev] [PATCH v5] " Xiao Wang
2021-07-13  9:34     ` Maxime Coquelin
2021-07-20  2:43     ` Xia, Chenbo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).