From: Gongming Chen <chengongming1900@outlook.com>
To: maxime.coquelin@redhat.com, chenbox@nvidia.com,
chengongming1900@outlook.com
Cc: dev@dpdk.org, Gongming Chen <chengm11@chinatelecom.cn>, stable@dpdk.org
Subject: [PATCH v2] vhost: fix crash caused by accessing a freed vsocket
Date: Mon, 8 Jul 2024 10:17:45 +0800 [thread overview]
Message-ID: <TYAP286MB0649F57394FD92719A1F99D8D8DA2@TYAP286MB0649.JPNP286.PROD.OUTLOOK.COM> (raw)
In-Reply-To: <TYAP286MB06494783C3973058BF499642D83D2@TYAP286MB0649.JPNP286.PROD.OUTLOOK.COM>
From: Gongming Chen <chengm11@chinatelecom.cn>
When a vhost user message handling error in the event dispatch thread,
vsocket reconn is added to the reconnection list of the reconnection
thread.
Since the reconnection, event dispatching and app configuration thread
do not have common thread protection restrictions, the app config
thread freed vsocket in the rte_vhost_driver_unregister process,
but vsocket reconn can still exist in the reconn_list through this
mechanism.
Then in the reconnection thread, the vsocket is connected again and
conn is added to the dispatch thread.
Finally, the vsocket that has been freed by rte_vhost_driver_unregister
is accessed again in the event dispatch thread, resulting in a
use-after-free error.
This patch adds a vhost threads read-write lock to restrict
reconnection, event dispatching and app configuration threads.
When the vhost driver unregisters, it exclusively holds the lock to
safely free the vsocket.
#0 0x0000000000000025 in ?? ()
#1 0x0000000003ed7ca0 in vhost_user_read_cb at lib/vhost/socket.c:323
#2 0x0000000003ed625f in fdset_event_dispatch at lib/vhost/fd_man.c:365
#3 0x0000000004168336 in ctrl_thread_init at lib/eal/common/eal_common_thread.c:282
#4 0x00007ffff7bc6ea5 in start_thread () from /lib64/libpthread.so.0
#5 0x00007ffff6209b0d in clone () from /lib64/libc.so.6
Fixes: e623e0c6d8a5 ("vhost: add vhost-user client mode")
Cc: stable@dpdk.org
Signed-off-by: Gongming Chen <chengm11@chinatelecom.cn>
---
lib/vhost/fd_man.c | 3 +++
lib/vhost/meson.build | 1 +
lib/vhost/socket.c | 30 ++++++++++++------------------
lib/vhost/vhost_thread.c | 29 +++++++++++++++++++++++++++++
lib/vhost/vhost_thread.h | 12 ++++++++++++
5 files changed, 57 insertions(+), 18 deletions(-)
create mode 100644 lib/vhost/vhost_thread.c
create mode 100644 lib/vhost/vhost_thread.h
diff --git a/lib/vhost/fd_man.c b/lib/vhost/fd_man.c
index 87a8dc3f3e..32472d5ed7 100644
--- a/lib/vhost/fd_man.c
+++ b/lib/vhost/fd_man.c
@@ -15,6 +15,7 @@
#include <rte_thread.h>
#include "fd_man.h"
+#include "vhost_thread.h"
RTE_LOG_REGISTER_SUFFIX(vhost_fdset_logtype, fdset, INFO);
#define RTE_LOGTYPE_VHOST_FDMAN vhost_fdset_logtype
@@ -342,6 +343,7 @@ fdset_event_dispatch(void *arg)
if (numfds < 0)
continue;
+ vhost_thread_read_lock();
for (i = 0; i < numfds; i++) {
pthread_mutex_lock(&pfdset->fd_mutex);
@@ -379,6 +381,7 @@ fdset_event_dispatch(void *arg)
if (remove1 || remove2)
fdset_del(pfdset, fd);
}
+ vhost_thread_read_unlock();
if (pfdset->destroy)
break;
diff --git a/lib/vhost/meson.build b/lib/vhost/meson.build
index 41b622a9be..7bc1840ed0 100644
--- a/lib/vhost/meson.build
+++ b/lib/vhost/meson.build
@@ -25,6 +25,7 @@ sources = files(
'vdpa.c',
'vhost.c',
'vhost_crypto.c',
+ 'vhost_thread.c',
'vhost_user.c',
'virtio_net.c',
'virtio_net_ctrl.c',
diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c
index a75728a2e4..c9c295c2a3 100644
--- a/lib/vhost/socket.c
+++ b/lib/vhost/socket.c
@@ -20,6 +20,7 @@
#include "fd_man.h"
#include "vduse.h"
#include "vhost.h"
+#include "vhost_thread.h"
#include "vhost_user.h"
@@ -456,6 +457,7 @@ vhost_user_client_reconnect(void *arg __rte_unused)
struct vhost_user_reconnect *reconn, *next;
while (1) {
+ vhost_thread_read_lock();
pthread_mutex_lock(&reconn_list.mutex);
/*
@@ -487,6 +489,7 @@ vhost_user_client_reconnect(void *arg __rte_unused)
}
pthread_mutex_unlock(&reconn_list.mutex);
+ vhost_thread_read_unlock();
sleep(1);
}
@@ -1067,7 +1070,7 @@ rte_vhost_driver_unregister(const char *path)
if (path == NULL)
return -1;
-again:
+ vhost_thread_write_lock();
pthread_mutex_lock(&vhost_user.mutex);
for (i = 0; i < vhost_user.vsocket_cnt; i++) {
@@ -1079,14 +1082,10 @@ rte_vhost_driver_unregister(const char *path)
vduse_device_destroy(path);
} else if (vsocket->is_server) {
/*
- * If r/wcb is executing, release vhost_user's
- * mutex lock, and try again since the r/wcb
- * may use the mutex lock.
+ * The vhost thread write lock has been acquired,
+ * and fd must be deleted from fdset.
*/
- if (fdset_try_del(vhost_user.fdset, vsocket->socket_fd) == -1) {
- pthread_mutex_unlock(&vhost_user.mutex);
- goto again;
- }
+ fdset_del(vhost_user.fdset, vsocket->socket_fd);
} else if (vsocket->reconnect) {
vhost_user_remove_reconnect(vsocket);
}
@@ -1098,17 +1097,10 @@ rte_vhost_driver_unregister(const char *path)
next = TAILQ_NEXT(conn, next);
/*
- * If r/wcb is executing, release vsocket's
- * conn_mutex and vhost_user's mutex locks, and
- * try again since the r/wcb may use the
- * conn_mutex and mutex locks.
+ * The vhost thread write lock has been acquired,
+ * and fd must be deleted from fdset.
*/
- if (fdset_try_del(vhost_user.fdset,
- conn->connfd) == -1) {
- pthread_mutex_unlock(&vsocket->conn_mutex);
- pthread_mutex_unlock(&vhost_user.mutex);
- goto again;
- }
+ fdset_del(vhost_user.fdset, conn->connfd);
VHOST_CONFIG_LOG(path, INFO, "free connfd %d", conn->connfd);
close(conn->connfd);
@@ -1130,9 +1122,11 @@ rte_vhost_driver_unregister(const char *path)
vhost_user.vsockets[i] = vhost_user.vsockets[count];
vhost_user.vsockets[count] = NULL;
pthread_mutex_unlock(&vhost_user.mutex);
+ vhost_thread_write_unlock();
return 0;
}
pthread_mutex_unlock(&vhost_user.mutex);
+ vhost_thread_write_unlock();
return -1;
}
diff --git a/lib/vhost/vhost_thread.c b/lib/vhost/vhost_thread.c
new file mode 100644
index 0000000000..df439e68e6
--- /dev/null
+++ b/lib/vhost/vhost_thread.c
@@ -0,0 +1,29 @@
+#include <rte_rwlock.h>
+
+#include "vhost_thread.h"
+
+static rte_rwlock_t vhost_thread_lock = RTE_RWLOCK_INITIALIZER;
+
+void
+vhost_thread_read_lock(void)
+{
+ rte_rwlock_read_lock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_read_unlock(void)
+{
+ rte_rwlock_read_unlock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_write_lock(void)
+{
+ rte_rwlock_write_lock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_write_unlock(void)
+{
+ rte_rwlock_write_unlock(&vhost_thread_lock);
+}
diff --git a/lib/vhost/vhost_thread.h b/lib/vhost/vhost_thread.h
new file mode 100644
index 0000000000..3c44b1c030
--- /dev/null
+++ b/lib/vhost/vhost_thread.h
@@ -0,0 +1,12 @@
+#ifndef _VHOST_THREAD_H_
+#define _VHOST_THREAD_H_
+
+void vhost_thread_read_lock(void);
+
+void vhost_thread_read_unlock(void);
+
+void vhost_thread_write_lock(void);
+
+void vhost_thread_write_unlock(void);
+
+#endif
--
2.32.1 (Apple Git-133)
next prev parent reply other threads:[~2024-07-08 2:18 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-03 16:05 [PATCH v1] " Gongming Chen
2024-05-10 7:28 ` Gongming Chen
2024-07-02 7:48 ` Maxime Coquelin
2024-07-08 23:50 ` Gongming Chen
2024-07-08 2:17 ` Gongming Chen [this message]
2024-07-08 2:23 ` [PATCH v3] " Gongming Chen
2024-07-08 4:41 ` [PATCH v4] " Gongming Chen
2024-07-09 7:26 ` David Marchand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=TYAP286MB0649F57394FD92719A1F99D8D8DA2@TYAP286MB0649.JPNP286.PROD.OUTLOOK.COM \
--to=chengongming1900@outlook.com \
--cc=chenbox@nvidia.com \
--cc=chengm11@chinatelecom.cn \
--cc=dev@dpdk.org \
--cc=maxime.coquelin@redhat.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).