DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ilya Maximets <i.maximets@samsung.com>
To: dev@dpdk.org, Huawei Xie <huawei.xie@intel.com>,
	Yuanhan Liu <yuanhan.liu@linux.intel.com>,
	Rich Lane <rich.lane@bigswitch.com>
Cc: Dyasly Sergey <s.dyasly@samsung.com>,
	Heetae Ahn <heetae82.ahn@samsung.com>,
	Jianfeng Tan <jianfeng.tan@intel.com>,
	 Stephen Hemminger <stephen@networkplumber.org>,
	Thomas Monjalon <thomas.monjalon@6wind.com>,
	Ilya Maximets <i.maximets@samsung.com>
Subject: [dpdk-dev] [PATCH v2] vhost: fix segfault on bad descriptor address
Date: Thu, 14 Jul 2016 11:18:39 +0300	[thread overview]
Message-ID: <1468484319-26906-1-git-send-email-i.maximets@samsung.com> (raw)
In-Reply-To: <1463748604-27251-1-git-send-email-i.maximets@samsung.com>

In current implementation vhost will crash with segmentation fault
if malicious or buggy virtio application breaks addresses of descriptors.

Before commit 0823c1cb0a73 this crash was reproducible even with
normal DPDK application that tries to change number of virtqueues
dynamically inside VM.

Fix that by checking addresses of descriptors before using.

Also fixed return value on error for 'copy_mbuf_to_desc_mergeable()'
from '-1' to '0' because it returns unsigned value and it means
number of used descriptors.

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
Version 2:
	* Rebased on top of current master.
	* host's address now checked in meargeable case,
	  because needed refactoring already done.
	* Commit-message changed because old issue with
	  virtio reload accidentially fixed by commit
	  0823c1cb0a73.

 lib/librte_vhost/vhost_rxtx.c | 28 +++++++++++++++++++++-------
 1 file changed, 21 insertions(+), 7 deletions(-)

diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 15ca956..31e8b58 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -147,10 +147,10 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
 
 	desc = &vq->desc[desc_idx];
-	if (unlikely(desc->len < dev->vhost_hlen))
+	desc_addr = gpa_to_vva(dev, desc->addr);
+	if (unlikely(desc->len < dev->vhost_hlen || !desc_addr))
 		return -1;
 
-	desc_addr = gpa_to_vva(dev, desc->addr);
 	rte_prefetch0((void *)(uintptr_t)desc_addr);
 
 	virtio_enqueue_offload(m, &virtio_hdr.hdr);
@@ -182,7 +182,10 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 				return -1;
 
 			desc = &vq->desc[desc->next];
-			desc_addr   = gpa_to_vva(dev, desc->addr);
+			desc_addr = gpa_to_vva(dev, desc->addr);
+			if (unlikely(!desc_addr))
+				return -1;
+
 			desc_offset = 0;
 			desc_avail  = desc->len;
 		}
@@ -387,10 +390,10 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
 		dev->vid, cur_idx, end_idx);
 
-	if (buf_vec[vec_idx].buf_len < dev->vhost_hlen)
-		return -1;
-
 	desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
+	if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr)
+		return 0;
+
 	rte_prefetch0((void *)(uintptr_t)desc_addr);
 
 	virtio_hdr.num_buffers = end_idx - start_idx;
@@ -425,6 +428,8 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 			vec_idx++;
 			desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
+			if (unlikely(!desc_addr))
+				return 0;
 
 			/* Prefetch buffer address. */
 			rte_prefetch0((void *)(uintptr_t)desc_addr);
@@ -507,7 +512,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
 		*(volatile uint16_t *)&vq->used->idx += nr_used;
 		vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
 			sizeof(vq->used->idx));
-		vq->last_used_idx = end;
+		vq->last_used_idx += nr_used;
 	}
 
 	if (likely(pkt_idx)) {
@@ -688,6 +693,9 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		return -1;
 
 	desc_addr = gpa_to_vva(dev, desc->addr);
+	if (unlikely(!desc_addr))
+		return -1;
+
 	hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
 	rte_prefetch0(hdr);
 
@@ -701,6 +709,9 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		desc = &vq->desc[desc->next];
 
 		desc_addr = gpa_to_vva(dev, desc->addr);
+		if (unlikely(!desc_addr))
+			return -1;
+
 		rte_prefetch0((void *)(uintptr_t)desc_addr);
 
 		desc_offset = 0;
@@ -737,6 +748,9 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			desc = &vq->desc[desc->next];
 
 			desc_addr = gpa_to_vva(dev, desc->addr);
+			if (unlikely(!desc_addr))
+				return -1;
+
 			rte_prefetch0((void *)(uintptr_t)desc_addr);
 
 			desc_offset = 0;
-- 
2.7.4

  parent reply	other threads:[~2016-07-14  8:18 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-20 12:50 [dpdk-dev] [PATCH] " Ilya Maximets
2016-05-23 10:57 ` Yuanhan Liu
2016-05-23 11:04   ` Ilya Maximets
2016-05-30 11:05     ` Ilya Maximets
2016-05-30 14:25       ` Yuanhan Liu
2016-05-31  9:12         ` Ilya Maximets
2016-05-30 12:00 ` Tan, Jianfeng
2016-05-30 12:24   ` Ilya Maximets
2016-05-31  6:53     ` Tan, Jianfeng
2016-05-31  9:10       ` Ilya Maximets
2016-05-31 22:06 ` Rich Lane
2016-06-02 10:46   ` Ilya Maximets
2016-06-02 16:22     ` Rich Lane
2016-06-03  6:01       ` Ilya Maximets
2016-07-01  7:35 ` Yuanhan Liu
2016-07-06 11:19   ` Ilya Maximets
2016-07-06 12:24     ` Yuanhan Liu
2016-07-08 11:48       ` Ilya Maximets
2016-07-10 13:17         ` Yuanhan Liu
2016-07-11  8:38           ` Yuanhan Liu
2016-07-11  9:50             ` Ilya Maximets
2016-07-11 11:05               ` Yuanhan Liu
2016-07-11 11:47                 ` Ilya Maximets
2016-07-12  2:43                   ` Yuanhan Liu
2016-07-12  5:53                     ` Ilya Maximets
2016-07-13  7:34                       ` Ilya Maximets
2016-07-13  8:47                         ` Yuanhan Liu
2016-07-13 15:54                           ` Rich Lane
2016-07-14  1:42                             ` Yuanhan Liu
2016-07-14  4:38                               ` Ilya Maximets
2016-07-14  8:18 ` Ilya Maximets [this message]
2016-07-15  6:17   ` [dpdk-dev] [PATCH v2] " Yuanhan Liu
2016-07-15  7:23     ` Ilya Maximets
2016-07-15  8:40       ` Yuanhan Liu
2016-07-15 11:15 ` [dpdk-dev] [PATCH v3 0/2] " Ilya Maximets
2016-07-15 11:15   ` [dpdk-dev] [PATCH v3 1/2] vhost: fix using of bad return value on mergeable enqueue Ilya Maximets
2016-07-15 11:15   ` [dpdk-dev] [PATCH v3 2/2] vhost: do sanity check for ring descriptor address Ilya Maximets
2016-07-15 12:14   ` [dpdk-dev] [PATCH v3 0/2] vhost: fix segfault on bad " Yuanhan Liu
2016-07-15 19:37     ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1468484319-26906-1-git-send-email-i.maximets@samsung.com \
    --to=i.maximets@samsung.com \
    --cc=dev@dpdk.org \
    --cc=heetae82.ahn@samsung.com \
    --cc=huawei.xie@intel.com \
    --cc=jianfeng.tan@intel.com \
    --cc=rich.lane@bigswitch.com \
    --cc=s.dyasly@samsung.com \
    --cc=stephen@networkplumber.org \
    --cc=thomas.monjalon@6wind.com \
    --cc=yuanhan.liu@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).