DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ilya Maximets <i.maximets@samsung.com>
To: dev@dpdk.org
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>,
	Tiwei Bie <tiwei.bie@intel.com>,
	Zhihong Wang <zhihong.wang@intel.com>,
	Ilya Maximets <i.maximets@samsung.com>,
	stable@dpdk.org
Subject: [dpdk-dev] [PATCH v2] vhost-user: drop connection on message handling failures
Date: Mon,  3 Sep 2018 13:12:24 +0300	[thread overview]
Message-ID: <20180903101059eucas1p2030d1e9077a03bc5d0613ba0311e1eab~Q3BXc1AtS3196731967eucas1p2z@eucas1p2.samsung.com> (raw)
In-Reply-To: <20180817113354.16140-1-i.maximets@samsung.com>

There are a lot of cases where vhost-user massage handling
could fail and end up in a fully not recoverable state. For
example, allocation failures of shadow used ring and batched
copy array are not recoverable and leads to the segmentation
faults like this on the receiving/transmission path:

  Program received signal SIGSEGV, Segmentation fault.
  [Switching to Thread 0x7f913fecf0 (LWP 43625)]
  in copy_desc_to_mbuf () at /lib/librte_vhost/virtio_net.c:760
  760       batch_copy[vq->batch_copy_nb_elems].dst =

This could be easily reproduced in case of low memory or big
number of vhost-user ports.

Fix that by propagating error to the upper layer which will
end up with disconnection in case we can not report to
the message sender when the error happens.

Fixes: f689586bc060 ("vhost: shadow used ring update")
Cc: stable@dpdk.org

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---

v2:
  * Patch changed to cover most of possible failures at once. [Tiwei Bie]

 lib/librte_vhost/vhost_user.c | 51 +++++++++++++++++++++--------------
 1 file changed, 31 insertions(+), 20 deletions(-)

diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index a2d4c9ffc..7997b890f 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -1012,7 +1012,7 @@ vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)
 	vq->callfd = file.fd;
 }
 
-static void
+static int
 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
 {
 	struct vhost_vring_file file;
@@ -1030,7 +1030,7 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
 	/* Interpret ring addresses only when ring is started. */
 	dev = translate_ring_addresses(dev, file.index);
 	if (!dev)
-		return;
+		return -1;
 
 	*pdev = dev;
 
@@ -1047,6 +1047,7 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
 	if (vq->kickfd >= 0)
 		close(vq->kickfd);
 	vq->kickfd = file.fd;
+	return 0;
 }
 
 static void
@@ -1170,14 +1171,19 @@ vhost_user_get_protocol_features(struct virtio_net *dev,
 	msg->size = sizeof(msg->payload.u64);
 }
 
-static void
+static int
 vhost_user_set_protocol_features(struct virtio_net *dev,
 				 uint64_t protocol_features)
 {
-	if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
-		return;
+	if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES) {
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"(%d) received invalid protocol features.\n",
+			dev->vid);
+		return -1;
+	}
 
 	dev->protocol_features = protocol_features;
+	return 0;
 }
 
 static int
@@ -1655,8 +1661,6 @@ vhost_user_msg_handler(int vid, int fd)
 		break;
 	case VHOST_USER_SET_FEATURES:
 		ret = vhost_user_set_features(dev, msg.payload.u64);
-		if (ret)
-			return -1;
 		break;
 
 	case VHOST_USER_GET_PROTOCOL_FEATURES:
@@ -1664,14 +1668,14 @@ vhost_user_msg_handler(int vid, int fd)
 		send_vhost_reply(fd, &msg);
 		break;
 	case VHOST_USER_SET_PROTOCOL_FEATURES:
-		vhost_user_set_protocol_features(dev, msg.payload.u64);
+		ret = vhost_user_set_protocol_features(dev, msg.payload.u64);
 		break;
 
 	case VHOST_USER_SET_OWNER:
-		vhost_user_set_owner();
+		ret = vhost_user_set_owner();
 		break;
 	case VHOST_USER_RESET_OWNER:
-		vhost_user_reset_owner(dev);
+		ret = vhost_user_reset_owner(dev);
 		break;
 
 	case VHOST_USER_SET_MEM_TABLE:
@@ -1679,8 +1683,9 @@ vhost_user_msg_handler(int vid, int fd)
 		break;
 
 	case VHOST_USER_SET_LOG_BASE:
-		vhost_user_set_log_base(dev, &msg);
-
+		ret = vhost_user_set_log_base(dev, &msg);
+		if (ret)
+			goto skip_to_reply;
 		/* it needs a reply */
 		msg.size = sizeof(msg.payload.u64);
 		send_vhost_reply(fd, &msg);
@@ -1691,23 +1696,25 @@ vhost_user_msg_handler(int vid, int fd)
 		break;
 
 	case VHOST_USER_SET_VRING_NUM:
-		vhost_user_set_vring_num(dev, &msg);
+		ret = vhost_user_set_vring_num(dev, &msg);
 		break;
 	case VHOST_USER_SET_VRING_ADDR:
-		vhost_user_set_vring_addr(&dev, &msg);
+		ret = vhost_user_set_vring_addr(&dev, &msg);
 		break;
 	case VHOST_USER_SET_VRING_BASE:
-		vhost_user_set_vring_base(dev, &msg);
+		ret = vhost_user_set_vring_base(dev, &msg);
 		break;
 
 	case VHOST_USER_GET_VRING_BASE:
-		vhost_user_get_vring_base(dev, &msg);
+		ret = vhost_user_get_vring_base(dev, &msg);
+		if (ret)
+			goto skip_to_reply;
 		msg.size = sizeof(msg.payload.state);
 		send_vhost_reply(fd, &msg);
 		break;
 
 	case VHOST_USER_SET_VRING_KICK:
-		vhost_user_set_vring_kick(&dev, &msg);
+		ret = vhost_user_set_vring_kick(&dev, &msg);
 		break;
 	case VHOST_USER_SET_VRING_CALL:
 		vhost_user_set_vring_call(dev, &msg);
@@ -1726,10 +1733,10 @@ vhost_user_msg_handler(int vid, int fd)
 		break;
 
 	case VHOST_USER_SET_VRING_ENABLE:
-		vhost_user_set_vring_enable(dev, &msg);
+		ret = vhost_user_set_vring_enable(dev, &msg);
 		break;
 	case VHOST_USER_SEND_RARP:
-		vhost_user_send_rarp(dev, &msg);
+		ret = vhost_user_send_rarp(dev, &msg);
 		break;
 
 	case VHOST_USER_NET_SET_MTU:
@@ -1750,7 +1757,7 @@ vhost_user_msg_handler(int vid, int fd)
 	}
 
 skip_to_post_handle:
-	if (dev->extern_ops.post_msg_handle) {
+	if (!ret && dev->extern_ops.post_msg_handle) {
 		uint32_t need_reply;
 
 		ret = (*dev->extern_ops.post_msg_handle)(
@@ -1770,6 +1777,10 @@ vhost_user_msg_handler(int vid, int fd)
 		msg.payload.u64 = !!ret;
 		msg.size = sizeof(msg.payload.u64);
 		send_vhost_reply(fd, &msg);
+	} else if (ret) {
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"vhost message handling failed.\n");
+		return -1;
 	}
 
 	if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
-- 
2.17.1

  parent reply	other threads:[~2018-09-03 10:11 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <CGME20180817113255eucas1p2042d22241265dafa241c2a6a2c949244@eucas1p2.samsung.com>
2018-08-17 11:33 ` [dpdk-dev] [PATCH] vhost: fix crash if set vring num handling failed Ilya Maximets
2018-08-29 14:00   ` Ilya Maximets
2018-08-30  6:06     ` Tiwei Bie
2018-08-30  6:54       ` Ilya Maximets
2018-09-03  2:26         ` Tiwei Bie
2018-09-03  7:07           ` Ilya Maximets
     [not found]   ` <CGME20180903101059eucas1p2030d1e9077a03bc5d0613ba0311e1eab@eucas1p2.samsung.com>
2018-09-03 10:12     ` Ilya Maximets [this message]
2018-09-10 13:51       ` [dpdk-dev] [PATCH v2] vhost-user: drop connection on message handling failures Maxime Coquelin
2018-09-11  9:25       ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='20180903101059eucas1p2030d1e9077a03bc5d0613ba0311e1eab~Q3BXc1AtS3196731967eucas1p2z@eucas1p2.samsung.com' \
    --to=i.maximets@samsung.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=stable@dpdk.org \
    --cc=tiwei.bie@intel.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).