DPDK patches and discussions
 help / color / mirror / Atom feed
From: Stephen Hemminger <stephen@networkplumber.org>
To: Yunjian Wang <wangyunjian@huawei.com>
Cc: <dev@dpdk.org>, <maxime.coquelin@redhat.com>,
	<chenbox@nvidia.com>, <jerry.lilijun@huawei.com>,
	<xiawei40@huawei.com>, <wangzengyuan@huawei.com>,
	<stable@dpdk.org>
Subject: Re: [PATCH v2 1/1] vhost: fix a double fetch when dequeue offloading
Date: Fri, 20 Dec 2024 09:10:52 -0800	[thread overview]
Message-ID: <20241220091052.68bb13ee@hermes.local> (raw)
In-Reply-To: <09058cfb25d7583f67d74f09cd36673f1b10f5ec.1734661755.git.wangyunjian@huawei.com>

On Fri, 20 Dec 2024 11:49:55 +0800
Yunjian Wang <wangyunjian@huawei.com> wrote:

> The hdr->csum_start does two successive reads from user space to read a
> variable length data structure. The result overflow if the data structure
> changes between the two reads.
> 
> To fix this, we can prevent double fetch issue by copying virtio_hdr to
> the temporary variable.
> 
> Fixes: 4dc4e33ffa10 ("net/virtio: fix Rx checksum calculation")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Yunjian Wang <wangyunjian@huawei.com>


How about something like the following *untested*

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 69901ab3b5..c65cb639b2 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2861,25 +2861,28 @@ vhost_dequeue_offload(struct virtio_net *dev, struct virtio_net_hdr *hdr,
 	}
 }
 
-static __rte_noinline void
+static inline int
 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
-		struct buf_vector *buf_vec)
+			const struct buf_vector *buf_vec,
+			uint16_t nr_vec)
 {
-	uint64_t len;
-	uint64_t remain = sizeof(struct virtio_net_hdr);
-	uint64_t src;
-	uint64_t dst = (uint64_t)(uintptr_t)hdr;
+	size_t remain = sizeof(struct virtio_net_hdr);
+	uint8_t *dst = (uint8_t *)hdr;
 
-	while (remain) {
-		len = RTE_MIN(remain, buf_vec->buf_len);
-		src = buf_vec->buf_addr;
-		rte_memcpy((void *)(uintptr_t)dst,
-				(void *)(uintptr_t)src, len);
+	while (remain > 0) {
+		size_t len = RTE_MIN(remain, buf_vec->buf_len);
+		const void *src = (const void *)(uintptr_t)buf_vec->buf_addr;
 
+		if (unlikely(nr_vec == 0))
+			return -1;
+
+		memcpy(dst, src, len);
 		remain -= len;
 		dst += len;
 		buf_vec++;
+		--nr_vec;
 	}
+	return 0;
 }
 
 static __rte_always_inline int
@@ -2908,16 +2911,12 @@ desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	 */
 
 	if (virtio_net_with_host_offload(dev)) {
-		if (unlikely(buf_vec[0].buf_len < sizeof(struct virtio_net_hdr))) {
-			/*
-			 * No luck, the virtio-net header doesn't fit
-			 * in a contiguous virtual area.
-			 */
-			copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
-			hdr = &tmp_hdr;
-		} else {
-			hdr = (struct virtio_net_hdr *)((uintptr_t)buf_vec[0].buf_addr);
-		}
+		if (unlikely(copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec, nr_vec) != 0))
+		    return -1;
+
+		/* ensure that compiler does not delay copy */
+		rte_compiler_barrier();
+		hdr = &tmp_hdr;
 	}
 
 	for (vec_idx = 0; vec_idx < nr_vec; vec_idx++) {
@@ -3363,7 +3362,6 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
 {
 	uint16_t avail_idx = vq->last_avail_idx;
 	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
-	struct virtio_net_hdr *hdr;
 	uintptr_t desc_addrs[PACKED_BATCH_SIZE];
 	uint16_t ids[PACKED_BATCH_SIZE];
 	uint16_t i;
@@ -3382,8 +3380,12 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
 
 	if (virtio_net_with_host_offload(dev)) {
 		vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-			hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
-			vhost_dequeue_offload(dev, hdr, pkts[i], legacy_ol_flags);
+			struct virtio_net_hdr hdr;
+
+			memcpy(&hdr, (void *)desc_addrs[i], sizeof(struct virtio_net_hdr));
+			rte_compiler_barrier();
+
+			vhost_dequeue_offload(dev, &hdr, pkts[i], legacy_ol_flags);
 		}
 	}
 


  reply	other threads:[~2024-12-20 17:10 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-12-19  6:38 [PATCH " Yunjian Wang
2024-12-19  8:24 ` David Marchand
2024-12-19 11:02   ` Wangyunjian(wangyunjian,TongTu)
2024-12-19 16:15 ` Stephen Hemminger
2024-12-20  2:17   ` Wangyunjian(wangyunjian,TongTu)
2024-12-20  4:59     ` Stephen Hemminger
2024-12-20  3:49 ` [PATCH v2 " Yunjian Wang
2024-12-20 17:10   ` Stephen Hemminger [this message]
2024-12-20 16:35 ` [PATCH " Stephen Hemminger
2024-12-23  2:45   ` Wangyunjian(wangyunjian,TongTu)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241220091052.68bb13ee@hermes.local \
    --to=stephen@networkplumber.org \
    --cc=chenbox@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=jerry.lilijun@huawei.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=stable@dpdk.org \
    --cc=wangyunjian@huawei.com \
    --cc=wangzengyuan@huawei.com \
    --cc=xiawei40@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).