* [PATCH v1] vhost: fix crash caused by accessing a freed vsocket
@ 2024-04-03 16:05 Gongming Chen
2024-05-10 7:28 ` Gongming Chen
` (3 more replies)
0 siblings, 4 replies; 8+ messages in thread
From: Gongming Chen @ 2024-04-03 16:05 UTC (permalink / raw)
To: maxime.coquelin, chenbox, chengongming1900; +Cc: dev, Gongming Chen, stable
From: Gongming Chen <chengm11@chinatelecom.cn>
When a vhost user message handling error in the event dispatch thread,
vsocket reconn is added to the reconnection list of the reconnection
thread.
Since the reconnection, event dispatching and app configuration thread
do not have common thread protection restrictions, the app config
thread freed vsocket in the rte_vhost_driver_unregister process,
but vsocket reconn can still exist in the reconn_list through this
mechanism.
Then in the reconnection thread, the vsocket is connected again and
conn is added to the dispatch thread.
Finally, the vsocket is accessed again in the event dispatch thread,
resulting in a use-after-free error.
This patch adds a vhost threads read-write lock to restrict
reconnection, event dispatching and app configuration threads.
When the vhost driver unregisters, it exclusively holds the lock to
safely free the vsocket.
#0 0x0000000000000025 in ?? ()
#1 0x0000000003ed7ca0 in vhost_user_read_cb at lib/vhost/socket.c:330
#2 0x0000000003ed625f in fdset_event_dispatch at lib/vhost/fd_man.c:283
Fixes: e623e0c6d8a5 ("vhost: add vhost-user client mode")
Cc: stable@dpdk.org
Signed-off-by: Gongming Chen <chengm11@chinatelecom.cn>
---
lib/vhost/fd_man.c | 3 +++
lib/vhost/meson.build | 1 +
lib/vhost/socket.c | 30 ++++++++++++------------------
lib/vhost/vhost_thread.c | 37 +++++++++++++++++++++++++++++++++++++
lib/vhost/vhost_thread.h | 16 ++++++++++++++++
5 files changed, 69 insertions(+), 18 deletions(-)
create mode 100644 lib/vhost/vhost_thread.c
create mode 100644 lib/vhost/vhost_thread.h
diff --git a/lib/vhost/fd_man.c b/lib/vhost/fd_man.c
index 481e6b900a..b0e0aa2640 100644
--- a/lib/vhost/fd_man.c
+++ b/lib/vhost/fd_man.c
@@ -9,6 +9,7 @@
#include <rte_log.h>
#include "fd_man.h"
+#include "vhost_thread.h"
RTE_LOG_REGISTER_SUFFIX(vhost_fdset_logtype, fdset, INFO);
#define RTE_LOGTYPE_VHOST_FDMAN vhost_fdset_logtype
@@ -250,6 +251,7 @@ fdset_event_dispatch(void *arg)
if (val < 0)
continue;
+ vhost_thread_read_lock();
need_shrink = 0;
for (i = 0; i < numfds; i++) {
pthread_mutex_lock(&pfdset->fd_mutex);
@@ -303,6 +305,7 @@ fdset_event_dispatch(void *arg)
if (need_shrink)
fdset_shrink(pfdset);
+ vhost_thread_read_unlock();
}
return 0;
diff --git a/lib/vhost/meson.build b/lib/vhost/meson.build
index 41b622a9be..7bc1840ed0 100644
--- a/lib/vhost/meson.build
+++ b/lib/vhost/meson.build
@@ -25,6 +25,7 @@ sources = files(
'vdpa.c',
'vhost.c',
'vhost_crypto.c',
+ 'vhost_thread.c',
'vhost_user.c',
'virtio_net.c',
'virtio_net_ctrl.c',
diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c
index 96b3ab5595..e05d36f549 100644
--- a/lib/vhost/socket.c
+++ b/lib/vhost/socket.c
@@ -20,6 +20,7 @@
#include "fd_man.h"
#include "vduse.h"
#include "vhost.h"
+#include "vhost_thread.h"
#include "vhost_user.h"
@@ -463,6 +464,7 @@ vhost_user_client_reconnect(void *arg __rte_unused)
struct vhost_user_reconnect *reconn, *next;
while (1) {
+ vhost_thread_read_lock();
pthread_mutex_lock(&reconn_list.mutex);
/*
@@ -494,6 +496,7 @@ vhost_user_client_reconnect(void *arg __rte_unused)
}
pthread_mutex_unlock(&reconn_list.mutex);
+ vhost_thread_read_unlock();
sleep(1);
}
@@ -1071,7 +1074,7 @@ rte_vhost_driver_unregister(const char *path)
if (path == NULL)
return -1;
-again:
+ vhost_thread_write_lock();
pthread_mutex_lock(&vhost_user.mutex);
for (i = 0; i < vhost_user.vsocket_cnt; i++) {
@@ -1083,14 +1086,10 @@ rte_vhost_driver_unregister(const char *path)
vduse_device_destroy(path);
} else if (vsocket->is_server) {
/*
- * If r/wcb is executing, release vhost_user's
- * mutex lock, and try again since the r/wcb
- * may use the mutex lock.
+ * The vhost thread write lock has been acquired,
+ * and fd must be deleted from fdset.
*/
- if (fdset_try_del(&vhost_user.fdset, vsocket->socket_fd) == -1) {
- pthread_mutex_unlock(&vhost_user.mutex);
- goto again;
- }
+ fdset_del(&vhost_user.fdset, vsocket->socket_fd);
} else if (vsocket->reconnect) {
vhost_user_remove_reconnect(vsocket);
}
@@ -1102,17 +1101,10 @@ rte_vhost_driver_unregister(const char *path)
next = TAILQ_NEXT(conn, next);
/*
- * If r/wcb is executing, release vsocket's
- * conn_mutex and vhost_user's mutex locks, and
- * try again since the r/wcb may use the
- * conn_mutex and mutex locks.
+ * The vhost thread write lock has been acquired,
+ * and fd must be deleted from fdset.
*/
- if (fdset_try_del(&vhost_user.fdset,
- conn->connfd) == -1) {
- pthread_mutex_unlock(&vsocket->conn_mutex);
- pthread_mutex_unlock(&vhost_user.mutex);
- goto again;
- }
+ fdset_del(&vhost_user.fdset, conn->connfd);
VHOST_CONFIG_LOG(path, INFO, "free connfd %d", conn->connfd);
close(conn->connfd);
@@ -1134,9 +1126,11 @@ rte_vhost_driver_unregister(const char *path)
vhost_user.vsockets[i] = vhost_user.vsockets[count];
vhost_user.vsockets[count] = NULL;
pthread_mutex_unlock(&vhost_user.mutex);
+ vhost_thread_write_unlock();
return 0;
}
pthread_mutex_unlock(&vhost_user.mutex);
+ vhost_thread_write_unlock();
return -1;
}
diff --git a/lib/vhost/vhost_thread.c b/lib/vhost/vhost_thread.c
new file mode 100644
index 0000000000..6b5dc22042
--- /dev/null
+++ b/lib/vhost/vhost_thread.c
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 China Telecom Cloud Technology Co., Ltd
+ */
+
+#include <rte_rwlock.h>
+
+#include "vhost_thread.h"
+
+static rte_rwlock_t vhost_thread_lock = RTE_RWLOCK_INITIALIZER;
+
+void
+vhost_thread_read_lock(void)
+ __rte_no_thread_safety_analysis
+{
+ rte_rwlock_read_lock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_read_unlock(void)
+ __rte_no_thread_safety_analysis
+{
+ rte_rwlock_read_unlock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_write_lock(void)
+ __rte_no_thread_safety_analysis
+{
+ rte_rwlock_write_lock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_write_unlock(void)
+ __rte_no_thread_safety_analysis
+{
+ rte_rwlock_write_unlock(&vhost_thread_lock);
+}
diff --git a/lib/vhost/vhost_thread.h b/lib/vhost/vhost_thread.h
new file mode 100644
index 0000000000..61679172af
--- /dev/null
+++ b/lib/vhost/vhost_thread.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 China Telecom Cloud Technology Co., Ltd
+ */
+
+#ifndef _VHOST_THREAD_H_
+#define _VHOST_THREAD_H_
+
+void vhost_thread_read_lock(void);
+
+void vhost_thread_read_unlock(void);
+
+void vhost_thread_write_lock(void);
+
+void vhost_thread_write_unlock(void);
+
+#endif
--
2.44.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v1] vhost: fix crash caused by accessing a freed vsocket
2024-04-03 16:05 [PATCH v1] vhost: fix crash caused by accessing a freed vsocket Gongming Chen
@ 2024-05-10 7:28 ` Gongming Chen
2024-07-02 7:48 ` Maxime Coquelin
2024-07-08 2:17 ` [PATCH v2] " Gongming Chen
` (2 subsequent siblings)
3 siblings, 1 reply; 8+ messages in thread
From: Gongming Chen @ 2024-05-10 7:28 UTC (permalink / raw)
To: Maxime Coquelin, chenbox, chengongming1900; +Cc: dev, Gongming Chen, stable
Hi Maxime and Chenbo,
Do you have any suggestions for how to address this?
Looking forward to hearing from you!
Thanks,
Gongming
> On Apr 3, 2024, at 11:52 PM, Gongming Chen <chengongming1900@outlook.com> wrote:
>
> Hi Maxime,
> Thanks for review.
>
>> On Apr 3, 2024, at 5:39 PM, Maxime Coquelin <maxime.coquelin@redhat.com> wrote:
>>
>> Hi Gongming,
>>
>> It's the 9th time the patch has been sent.
>> I'm not sure whether there are changes between them or these are just
>> re-sends, but that's something to avoid.
>>
>
> Sorry, there's something wrong with my mailbox.
> I will send a v1 version as the latest patch, but they are actually the same.
>
>> If there are differences, you should use versionning to highlight it.
>> If unsure, please check the contributions guidelines first.
>>
>> Regarding the patch itself, I don't know if this is avoidable, but I
>> would prefer we do not introduce yet another lock in there.
>>
>> Thanks,
>> Maxime
>>
>
> I totally agree with your.
> Therefore, initially I hoped to solve this problem without introducing
> new lock. However, the result was not expected.
>
> 1. The vsocket is shared between the event and reconnect threads by
> transmitting the vsocket pointer. Therefore, there is no way to protect
> vsocket through a simple vsocket lock.
>
> 2. The event and reconnect threads can transmit vsocket pointers to
> each other, so there is no way to ensure that vsocket will not be
> accessed by locking the two threads separately.
>
> 3. Therefore, on the vsocket resource, event and reconnect are in the
> same critical section. Only by locking two threads at the same time
> can the vsocket be ensured that it will not be accessed and can be
> freed safely.
>
> Currently, app config, event, and reconnect threads respectively have
> locks corresponding to their own maintenance resources,
> vhost_user.mutex, pfdset->fd_mutex, and reconn_list.mutex.
>
> I think there is a thread-level lock missing here to protect the
> critical section between threads, just like the rcu scene protection.
>
> After app config acquires the write lock, it ensures that the event and
> reconnect threads are outside the critical section.
> This is to completely clean up the resources associated with vsocket
> and safely free vsocket.
>
> Therefore, considering future expansion, if there may be more
> resources like vsocket, this thread lock can also be used to ensure
> that resources are safely released after complete cleanup.
>
> In this way, the threads will be clearer, and the complicated try lock
> method is no longer needed.
>
> Thanks,
> Gongming
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v1] vhost: fix crash caused by accessing a freed vsocket
2024-05-10 7:28 ` Gongming Chen
@ 2024-07-02 7:48 ` Maxime Coquelin
2024-07-08 23:50 ` Gongming Chen
0 siblings, 1 reply; 8+ messages in thread
From: Maxime Coquelin @ 2024-07-02 7:48 UTC (permalink / raw)
To: Gongming Chen, chenbox; +Cc: dev, Gongming Chen, stable
Hi Gongming,
On 5/10/24 09:28, Gongming Chen wrote:
> Hi Maxime and Chenbo,
>
> Do you have any suggestions for how to address this?
>
> Looking forward to hearing from you!
Could you please have a try with latest DPDK main branch,
and if it reproduces, rebase your series on top of it.
I don't think it has been fixed, but we've done significant changes in
fdman in this release so we need a rebase anyways.
Thanks in advance,
Maxime
>
> Thanks,
> Gongming
>
>> On Apr 3, 2024, at 11:52 PM, Gongming Chen <chengongming1900@outlook.com> wrote:
>>
>> Hi Maxime,
>> Thanks for review.
>>
>>> On Apr 3, 2024, at 5:39 PM, Maxime Coquelin <maxime.coquelin@redhat.com> wrote:
>>>
>>> Hi Gongming,
>>>
>>> It's the 9th time the patch has been sent.
>>> I'm not sure whether there are changes between them or these are just
>>> re-sends, but that's something to avoid.
>>>
>>
>> Sorry, there's something wrong with my mailbox.
>> I will send a v1 version as the latest patch, but they are actually the same.
>>
>>> If there are differences, you should use versionning to highlight it.
>>> If unsure, please check the contributions guidelines first.
>>>
>>> Regarding the patch itself, I don't know if this is avoidable, but I
>>> would prefer we do not introduce yet another lock in there.
>>>
>>> Thanks,
>>> Maxime
>>>
>>
>> I totally agree with your.
>> Therefore, initially I hoped to solve this problem without introducing
>> new lock. However, the result was not expected.
>>
>> 1. The vsocket is shared between the event and reconnect threads by
>> transmitting the vsocket pointer. Therefore, there is no way to protect
>> vsocket through a simple vsocket lock.
>>
>> 2. The event and reconnect threads can transmit vsocket pointers to
>> each other, so there is no way to ensure that vsocket will not be
>> accessed by locking the two threads separately.
>>
>> 3. Therefore, on the vsocket resource, event and reconnect are in the
>> same critical section. Only by locking two threads at the same time
>> can the vsocket be ensured that it will not be accessed and can be
>> freed safely.
>>
>> Currently, app config, event, and reconnect threads respectively have
>> locks corresponding to their own maintenance resources,
>> vhost_user.mutex, pfdset->fd_mutex, and reconn_list.mutex.
>>
>> I think there is a thread-level lock missing here to protect the
>> critical section between threads, just like the rcu scene protection.
>>
>> After app config acquires the write lock, it ensures that the event and
>> reconnect threads are outside the critical section.
>> This is to completely clean up the resources associated with vsocket
>> and safely free vsocket.
>>
>> Therefore, considering future expansion, if there may be more
>> resources like vsocket, this thread lock can also be used to ensure
>> that resources are safely released after complete cleanup.
>>
>> In this way, the threads will be clearer, and the complicated try lock
>> method is no longer needed.
>>
>> Thanks,
>> Gongming
>
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v2] vhost: fix crash caused by accessing a freed vsocket
2024-04-03 16:05 [PATCH v1] vhost: fix crash caused by accessing a freed vsocket Gongming Chen
2024-05-10 7:28 ` Gongming Chen
@ 2024-07-08 2:17 ` Gongming Chen
2024-07-08 2:23 ` [PATCH v3] " Gongming Chen
2024-07-08 4:41 ` [PATCH v4] " Gongming Chen
3 siblings, 0 replies; 8+ messages in thread
From: Gongming Chen @ 2024-07-08 2:17 UTC (permalink / raw)
To: maxime.coquelin, chenbox, chengongming1900; +Cc: dev, Gongming Chen, stable
From: Gongming Chen <chengm11@chinatelecom.cn>
When a vhost user message handling error in the event dispatch thread,
vsocket reconn is added to the reconnection list of the reconnection
thread.
Since the reconnection, event dispatching and app configuration thread
do not have common thread protection restrictions, the app config
thread freed vsocket in the rte_vhost_driver_unregister process,
but vsocket reconn can still exist in the reconn_list through this
mechanism.
Then in the reconnection thread, the vsocket is connected again and
conn is added to the dispatch thread.
Finally, the vsocket that has been freed by rte_vhost_driver_unregister
is accessed again in the event dispatch thread, resulting in a
use-after-free error.
This patch adds a vhost threads read-write lock to restrict
reconnection, event dispatching and app configuration threads.
When the vhost driver unregisters, it exclusively holds the lock to
safely free the vsocket.
#0 0x0000000000000025 in ?? ()
#1 0x0000000003ed7ca0 in vhost_user_read_cb at lib/vhost/socket.c:323
#2 0x0000000003ed625f in fdset_event_dispatch at lib/vhost/fd_man.c:365
#3 0x0000000004168336 in ctrl_thread_init at lib/eal/common/eal_common_thread.c:282
#4 0x00007ffff7bc6ea5 in start_thread () from /lib64/libpthread.so.0
#5 0x00007ffff6209b0d in clone () from /lib64/libc.so.6
Fixes: e623e0c6d8a5 ("vhost: add vhost-user client mode")
Cc: stable@dpdk.org
Signed-off-by: Gongming Chen <chengm11@chinatelecom.cn>
---
lib/vhost/fd_man.c | 3 +++
lib/vhost/meson.build | 1 +
lib/vhost/socket.c | 30 ++++++++++++------------------
lib/vhost/vhost_thread.c | 29 +++++++++++++++++++++++++++++
lib/vhost/vhost_thread.h | 12 ++++++++++++
5 files changed, 57 insertions(+), 18 deletions(-)
create mode 100644 lib/vhost/vhost_thread.c
create mode 100644 lib/vhost/vhost_thread.h
diff --git a/lib/vhost/fd_man.c b/lib/vhost/fd_man.c
index 87a8dc3f3e..32472d5ed7 100644
--- a/lib/vhost/fd_man.c
+++ b/lib/vhost/fd_man.c
@@ -15,6 +15,7 @@
#include <rte_thread.h>
#include "fd_man.h"
+#include "vhost_thread.h"
RTE_LOG_REGISTER_SUFFIX(vhost_fdset_logtype, fdset, INFO);
#define RTE_LOGTYPE_VHOST_FDMAN vhost_fdset_logtype
@@ -342,6 +343,7 @@ fdset_event_dispatch(void *arg)
if (numfds < 0)
continue;
+ vhost_thread_read_lock();
for (i = 0; i < numfds; i++) {
pthread_mutex_lock(&pfdset->fd_mutex);
@@ -379,6 +381,7 @@ fdset_event_dispatch(void *arg)
if (remove1 || remove2)
fdset_del(pfdset, fd);
}
+ vhost_thread_read_unlock();
if (pfdset->destroy)
break;
diff --git a/lib/vhost/meson.build b/lib/vhost/meson.build
index 41b622a9be..7bc1840ed0 100644
--- a/lib/vhost/meson.build
+++ b/lib/vhost/meson.build
@@ -25,6 +25,7 @@ sources = files(
'vdpa.c',
'vhost.c',
'vhost_crypto.c',
+ 'vhost_thread.c',
'vhost_user.c',
'virtio_net.c',
'virtio_net_ctrl.c',
diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c
index a75728a2e4..c9c295c2a3 100644
--- a/lib/vhost/socket.c
+++ b/lib/vhost/socket.c
@@ -20,6 +20,7 @@
#include "fd_man.h"
#include "vduse.h"
#include "vhost.h"
+#include "vhost_thread.h"
#include "vhost_user.h"
@@ -456,6 +457,7 @@ vhost_user_client_reconnect(void *arg __rte_unused)
struct vhost_user_reconnect *reconn, *next;
while (1) {
+ vhost_thread_read_lock();
pthread_mutex_lock(&reconn_list.mutex);
/*
@@ -487,6 +489,7 @@ vhost_user_client_reconnect(void *arg __rte_unused)
}
pthread_mutex_unlock(&reconn_list.mutex);
+ vhost_thread_read_unlock();
sleep(1);
}
@@ -1067,7 +1070,7 @@ rte_vhost_driver_unregister(const char *path)
if (path == NULL)
return -1;
-again:
+ vhost_thread_write_lock();
pthread_mutex_lock(&vhost_user.mutex);
for (i = 0; i < vhost_user.vsocket_cnt; i++) {
@@ -1079,14 +1082,10 @@ rte_vhost_driver_unregister(const char *path)
vduse_device_destroy(path);
} else if (vsocket->is_server) {
/*
- * If r/wcb is executing, release vhost_user's
- * mutex lock, and try again since the r/wcb
- * may use the mutex lock.
+ * The vhost thread write lock has been acquired,
+ * and fd must be deleted from fdset.
*/
- if (fdset_try_del(vhost_user.fdset, vsocket->socket_fd) == -1) {
- pthread_mutex_unlock(&vhost_user.mutex);
- goto again;
- }
+ fdset_del(vhost_user.fdset, vsocket->socket_fd);
} else if (vsocket->reconnect) {
vhost_user_remove_reconnect(vsocket);
}
@@ -1098,17 +1097,10 @@ rte_vhost_driver_unregister(const char *path)
next = TAILQ_NEXT(conn, next);
/*
- * If r/wcb is executing, release vsocket's
- * conn_mutex and vhost_user's mutex locks, and
- * try again since the r/wcb may use the
- * conn_mutex and mutex locks.
+ * The vhost thread write lock has been acquired,
+ * and fd must be deleted from fdset.
*/
- if (fdset_try_del(vhost_user.fdset,
- conn->connfd) == -1) {
- pthread_mutex_unlock(&vsocket->conn_mutex);
- pthread_mutex_unlock(&vhost_user.mutex);
- goto again;
- }
+ fdset_del(vhost_user.fdset, conn->connfd);
VHOST_CONFIG_LOG(path, INFO, "free connfd %d", conn->connfd);
close(conn->connfd);
@@ -1130,9 +1122,11 @@ rte_vhost_driver_unregister(const char *path)
vhost_user.vsockets[i] = vhost_user.vsockets[count];
vhost_user.vsockets[count] = NULL;
pthread_mutex_unlock(&vhost_user.mutex);
+ vhost_thread_write_unlock();
return 0;
}
pthread_mutex_unlock(&vhost_user.mutex);
+ vhost_thread_write_unlock();
return -1;
}
diff --git a/lib/vhost/vhost_thread.c b/lib/vhost/vhost_thread.c
new file mode 100644
index 0000000000..df439e68e6
--- /dev/null
+++ b/lib/vhost/vhost_thread.c
@@ -0,0 +1,29 @@
+#include <rte_rwlock.h>
+
+#include "vhost_thread.h"
+
+static rte_rwlock_t vhost_thread_lock = RTE_RWLOCK_INITIALIZER;
+
+void
+vhost_thread_read_lock(void)
+{
+ rte_rwlock_read_lock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_read_unlock(void)
+{
+ rte_rwlock_read_unlock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_write_lock(void)
+{
+ rte_rwlock_write_lock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_write_unlock(void)
+{
+ rte_rwlock_write_unlock(&vhost_thread_lock);
+}
diff --git a/lib/vhost/vhost_thread.h b/lib/vhost/vhost_thread.h
new file mode 100644
index 0000000000..3c44b1c030
--- /dev/null
+++ b/lib/vhost/vhost_thread.h
@@ -0,0 +1,12 @@
+#ifndef _VHOST_THREAD_H_
+#define _VHOST_THREAD_H_
+
+void vhost_thread_read_lock(void);
+
+void vhost_thread_read_unlock(void);
+
+void vhost_thread_write_lock(void);
+
+void vhost_thread_write_unlock(void);
+
+#endif
--
2.32.1 (Apple Git-133)
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v3] vhost: fix crash caused by accessing a freed vsocket
2024-04-03 16:05 [PATCH v1] vhost: fix crash caused by accessing a freed vsocket Gongming Chen
2024-05-10 7:28 ` Gongming Chen
2024-07-08 2:17 ` [PATCH v2] " Gongming Chen
@ 2024-07-08 2:23 ` Gongming Chen
2024-07-08 4:41 ` [PATCH v4] " Gongming Chen
3 siblings, 0 replies; 8+ messages in thread
From: Gongming Chen @ 2024-07-08 2:23 UTC (permalink / raw)
To: maxime.coquelin, chenbox, chengongming1900; +Cc: dev, Gongming Chen, stable
From: Gongming Chen <chengm11@chinatelecom.cn>
When a vhost user message handling error in the event dispatch thread,
vsocket reconn is added to the reconnection list of the reconnection
thread.
Since the reconnection, event dispatching and app configuration thread
do not have common thread protection restrictions, the app config
thread freed vsocket in the rte_vhost_driver_unregister process,
but vsocket reconn can still exist in the reconn_list through this
mechanism.
Then in the reconnection thread, the vsocket is connected again and
conn is added to the dispatch thread.
Finally, the vsocket that has been freed by rte_vhost_driver_unregister
is accessed again in the event dispatch thread, resulting in a
use-after-free error.
This patch adds a vhost threads read-write lock to restrict
reconnection, event dispatching and app configuration threads.
When the vhost driver unregisters, it exclusively holds the lock to
safely free the vsocket.
#0 0x0000000000000025 in ?? ()
#1 0x0000000003ed7ca0 in vhost_user_read_cb at lib/vhost/socket.c:323
#2 0x0000000003ed625f in fdset_event_dispatch at lib/vhost/fd_man.c:365
#3 0x0000000004168336 in ctrl_thread_init at
lib/eal/common/eal_common_thread.c:282
#4 0x00007ffff7bc6ea5 in start_thread () from /lib64/libpthread.so.0
#5 0x00007ffff6209b0d in clone () from /lib64/libc.so.6
Fixes: e623e0c6d8a5 ("vhost: add vhost-user client mode")
Cc: stable@dpdk.org
Signed-off-by: Gongming Chen <chengm11@chinatelecom.cn>
---
lib/vhost/fd_man.c | 3 +++
lib/vhost/meson.build | 1 +
lib/vhost/socket.c | 30 ++++++++++++------------------
lib/vhost/vhost_thread.c | 29 +++++++++++++++++++++++++++++
lib/vhost/vhost_thread.h | 12 ++++++++++++
5 files changed, 57 insertions(+), 18 deletions(-)
create mode 100644 lib/vhost/vhost_thread.c
create mode 100644 lib/vhost/vhost_thread.h
diff --git a/lib/vhost/fd_man.c b/lib/vhost/fd_man.c
index 87a8dc3f3e..32472d5ed7 100644
--- a/lib/vhost/fd_man.c
+++ b/lib/vhost/fd_man.c
@@ -15,6 +15,7 @@
#include <rte_thread.h>
#include "fd_man.h"
+#include "vhost_thread.h"
RTE_LOG_REGISTER_SUFFIX(vhost_fdset_logtype, fdset, INFO);
#define RTE_LOGTYPE_VHOST_FDMAN vhost_fdset_logtype
@@ -342,6 +343,7 @@ fdset_event_dispatch(void *arg)
if (numfds < 0)
continue;
+ vhost_thread_read_lock();
for (i = 0; i < numfds; i++) {
pthread_mutex_lock(&pfdset->fd_mutex);
@@ -379,6 +381,7 @@ fdset_event_dispatch(void *arg)
if (remove1 || remove2)
fdset_del(pfdset, fd);
}
+ vhost_thread_read_unlock();
if (pfdset->destroy)
break;
diff --git a/lib/vhost/meson.build b/lib/vhost/meson.build
index 41b622a9be..7bc1840ed0 100644
--- a/lib/vhost/meson.build
+++ b/lib/vhost/meson.build
@@ -25,6 +25,7 @@ sources = files(
'vdpa.c',
'vhost.c',
'vhost_crypto.c',
+ 'vhost_thread.c',
'vhost_user.c',
'virtio_net.c',
'virtio_net_ctrl.c',
diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c
index a75728a2e4..c9c295c2a3 100644
--- a/lib/vhost/socket.c
+++ b/lib/vhost/socket.c
@@ -20,6 +20,7 @@
#include "fd_man.h"
#include "vduse.h"
#include "vhost.h"
+#include "vhost_thread.h"
#include "vhost_user.h"
@@ -456,6 +457,7 @@ vhost_user_client_reconnect(void *arg __rte_unused)
struct vhost_user_reconnect *reconn, *next;
while (1) {
+ vhost_thread_read_lock();
pthread_mutex_lock(&reconn_list.mutex);
/*
@@ -487,6 +489,7 @@ vhost_user_client_reconnect(void *arg __rte_unused)
}
pthread_mutex_unlock(&reconn_list.mutex);
+ vhost_thread_read_unlock();
sleep(1);
}
@@ -1067,7 +1070,7 @@ rte_vhost_driver_unregister(const char *path)
if (path == NULL)
return -1;
-again:
+ vhost_thread_write_lock();
pthread_mutex_lock(&vhost_user.mutex);
for (i = 0; i < vhost_user.vsocket_cnt; i++) {
@@ -1079,14 +1082,10 @@ rte_vhost_driver_unregister(const char *path)
vduse_device_destroy(path);
} else if (vsocket->is_server) {
/*
- * If r/wcb is executing, release vhost_user's
- * mutex lock, and try again since the r/wcb
- * may use the mutex lock.
+ * The vhost thread write lock has been acquired,
+ * and fd must be deleted from fdset.
*/
- if (fdset_try_del(vhost_user.fdset, vsocket->socket_fd) == -1) {
- pthread_mutex_unlock(&vhost_user.mutex);
- goto again;
- }
+ fdset_del(vhost_user.fdset, vsocket->socket_fd);
} else if (vsocket->reconnect) {
vhost_user_remove_reconnect(vsocket);
}
@@ -1098,17 +1097,10 @@ rte_vhost_driver_unregister(const char *path)
next = TAILQ_NEXT(conn, next);
/*
- * If r/wcb is executing, release vsocket's
- * conn_mutex and vhost_user's mutex locks, and
- * try again since the r/wcb may use the
- * conn_mutex and mutex locks.
+ * The vhost thread write lock has been acquired,
+ * and fd must be deleted from fdset.
*/
- if (fdset_try_del(vhost_user.fdset,
- conn->connfd) == -1) {
- pthread_mutex_unlock(&vsocket->conn_mutex);
- pthread_mutex_unlock(&vhost_user.mutex);
- goto again;
- }
+ fdset_del(vhost_user.fdset, conn->connfd);
VHOST_CONFIG_LOG(path, INFO, "free connfd %d", conn->connfd);
close(conn->connfd);
@@ -1130,9 +1122,11 @@ rte_vhost_driver_unregister(const char *path)
vhost_user.vsockets[i] = vhost_user.vsockets[count];
vhost_user.vsockets[count] = NULL;
pthread_mutex_unlock(&vhost_user.mutex);
+ vhost_thread_write_unlock();
return 0;
}
pthread_mutex_unlock(&vhost_user.mutex);
+ vhost_thread_write_unlock();
return -1;
}
diff --git a/lib/vhost/vhost_thread.c b/lib/vhost/vhost_thread.c
new file mode 100644
index 0000000000..df439e68e6
--- /dev/null
+++ b/lib/vhost/vhost_thread.c
@@ -0,0 +1,29 @@
+#include <rte_rwlock.h>
+
+#include "vhost_thread.h"
+
+static rte_rwlock_t vhost_thread_lock = RTE_RWLOCK_INITIALIZER;
+
+void
+vhost_thread_read_lock(void)
+{
+ rte_rwlock_read_lock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_read_unlock(void)
+{
+ rte_rwlock_read_unlock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_write_lock(void)
+{
+ rte_rwlock_write_lock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_write_unlock(void)
+{
+ rte_rwlock_write_unlock(&vhost_thread_lock);
+}
diff --git a/lib/vhost/vhost_thread.h b/lib/vhost/vhost_thread.h
new file mode 100644
index 0000000000..3c44b1c030
--- /dev/null
+++ b/lib/vhost/vhost_thread.h
@@ -0,0 +1,12 @@
+#ifndef _VHOST_THREAD_H_
+#define _VHOST_THREAD_H_
+
+void vhost_thread_read_lock(void);
+
+void vhost_thread_read_unlock(void);
+
+void vhost_thread_write_lock(void);
+
+void vhost_thread_write_unlock(void);
+
+#endif
--
2.32.1 (Apple Git-133)
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v4] vhost: fix crash caused by accessing a freed vsocket
2024-04-03 16:05 [PATCH v1] vhost: fix crash caused by accessing a freed vsocket Gongming Chen
` (2 preceding siblings ...)
2024-07-08 2:23 ` [PATCH v3] " Gongming Chen
@ 2024-07-08 4:41 ` Gongming Chen
2024-07-09 7:26 ` David Marchand
3 siblings, 1 reply; 8+ messages in thread
From: Gongming Chen @ 2024-07-08 4:41 UTC (permalink / raw)
To: maxime.coquelin, chenbox, chengongming1900; +Cc: dev, Gongming Chen, stable
From: Gongming Chen <chengm11@chinatelecom.cn>
When a vhost user message handling error in the event dispatch thread,
vsocket reconn is added to the reconnection list of the reconnection
thread.
Since the reconnection, event dispatching and app configuration thread
do not have common thread protection restrictions, the app config
thread freed vsocket in the rte_vhost_driver_unregister process,
but vsocket reconn can still exist in the reconn_list through this
mechanism.
Then in the reconnection thread, the vsocket is connected again and
conn is added to the dispatch thread.
Finally, the vsocket that has been freed by rte_vhost_driver_unregister
is accessed again in the event dispatch thread, resulting in a
use-after-free error.
This patch adds a vhost threads read-write lock to restrict
reconnection, event dispatching and app configuration threads.
When the vhost driver unregisters, it exclusively holds the lock to
safely free the vsocket.
#0 0x0000000000000025 in ?? ()
#1 0x0000000003ed7ca0 in vhost_user_read_cb at lib/vhost/socket.c:323
#2 0x0000000003ed625f in fdset_event_dispatch at lib/vhost/fd_man.c:365
Fixes: e623e0c6d8a5 ("vhost: add vhost-user client mode")
Cc: stable@dpdk.org
Signed-off-by: Gongming Chen <chengm11@chinatelecom.cn>
---
lib/vhost/fd_man.c | 3 +++
lib/vhost/meson.build | 1 +
lib/vhost/socket.c | 30 ++++++++++++------------------
lib/vhost/vhost_thread.c | 33 +++++++++++++++++++++++++++++++++
lib/vhost/vhost_thread.h | 12 ++++++++++++
5 files changed, 61 insertions(+), 18 deletions(-)
create mode 100644 lib/vhost/vhost_thread.c
create mode 100644 lib/vhost/vhost_thread.h
diff --git a/lib/vhost/fd_man.c b/lib/vhost/fd_man.c
index 87a8dc3f3e..32472d5ed7 100644
--- a/lib/vhost/fd_man.c
+++ b/lib/vhost/fd_man.c
@@ -15,6 +15,7 @@
#include <rte_thread.h>
#include "fd_man.h"
+#include "vhost_thread.h"
RTE_LOG_REGISTER_SUFFIX(vhost_fdset_logtype, fdset, INFO);
#define RTE_LOGTYPE_VHOST_FDMAN vhost_fdset_logtype
@@ -342,6 +343,7 @@ fdset_event_dispatch(void *arg)
if (numfds < 0)
continue;
+ vhost_thread_read_lock();
for (i = 0; i < numfds; i++) {
pthread_mutex_lock(&pfdset->fd_mutex);
@@ -379,6 +381,7 @@ fdset_event_dispatch(void *arg)
if (remove1 || remove2)
fdset_del(pfdset, fd);
}
+ vhost_thread_read_unlock();
if (pfdset->destroy)
break;
diff --git a/lib/vhost/meson.build b/lib/vhost/meson.build
index 41b622a9be..7bc1840ed0 100644
--- a/lib/vhost/meson.build
+++ b/lib/vhost/meson.build
@@ -25,6 +25,7 @@ sources = files(
'vdpa.c',
'vhost.c',
'vhost_crypto.c',
+ 'vhost_thread.c',
'vhost_user.c',
'virtio_net.c',
'virtio_net_ctrl.c',
diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c
index a75728a2e4..c9c295c2a3 100644
--- a/lib/vhost/socket.c
+++ b/lib/vhost/socket.c
@@ -20,6 +20,7 @@
#include "fd_man.h"
#include "vduse.h"
#include "vhost.h"
+#include "vhost_thread.h"
#include "vhost_user.h"
@@ -456,6 +457,7 @@ vhost_user_client_reconnect(void *arg __rte_unused)
struct vhost_user_reconnect *reconn, *next;
while (1) {
+ vhost_thread_read_lock();
pthread_mutex_lock(&reconn_list.mutex);
/*
@@ -487,6 +489,7 @@ vhost_user_client_reconnect(void *arg __rte_unused)
}
pthread_mutex_unlock(&reconn_list.mutex);
+ vhost_thread_read_unlock();
sleep(1);
}
@@ -1067,7 +1070,7 @@ rte_vhost_driver_unregister(const char *path)
if (path == NULL)
return -1;
-again:
+ vhost_thread_write_lock();
pthread_mutex_lock(&vhost_user.mutex);
for (i = 0; i < vhost_user.vsocket_cnt; i++) {
@@ -1079,14 +1082,10 @@ rte_vhost_driver_unregister(const char *path)
vduse_device_destroy(path);
} else if (vsocket->is_server) {
/*
- * If r/wcb is executing, release vhost_user's
- * mutex lock, and try again since the r/wcb
- * may use the mutex lock.
+ * The vhost thread write lock has been acquired,
+ * and fd must be deleted from fdset.
*/
- if (fdset_try_del(vhost_user.fdset, vsocket->socket_fd) == -1) {
- pthread_mutex_unlock(&vhost_user.mutex);
- goto again;
- }
+ fdset_del(vhost_user.fdset, vsocket->socket_fd);
} else if (vsocket->reconnect) {
vhost_user_remove_reconnect(vsocket);
}
@@ -1098,17 +1097,10 @@ rte_vhost_driver_unregister(const char *path)
next = TAILQ_NEXT(conn, next);
/*
- * If r/wcb is executing, release vsocket's
- * conn_mutex and vhost_user's mutex locks, and
- * try again since the r/wcb may use the
- * conn_mutex and mutex locks.
+ * The vhost thread write lock has been acquired,
+ * and fd must be deleted from fdset.
*/
- if (fdset_try_del(vhost_user.fdset,
- conn->connfd) == -1) {
- pthread_mutex_unlock(&vsocket->conn_mutex);
- pthread_mutex_unlock(&vhost_user.mutex);
- goto again;
- }
+ fdset_del(vhost_user.fdset, conn->connfd);
VHOST_CONFIG_LOG(path, INFO, "free connfd %d", conn->connfd);
close(conn->connfd);
@@ -1130,9 +1122,11 @@ rte_vhost_driver_unregister(const char *path)
vhost_user.vsockets[i] = vhost_user.vsockets[count];
vhost_user.vsockets[count] = NULL;
pthread_mutex_unlock(&vhost_user.mutex);
+ vhost_thread_write_unlock();
return 0;
}
pthread_mutex_unlock(&vhost_user.mutex);
+ vhost_thread_write_unlock();
return -1;
}
diff --git a/lib/vhost/vhost_thread.c b/lib/vhost/vhost_thread.c
new file mode 100644
index 0000000000..f3ff182976
--- /dev/null
+++ b/lib/vhost/vhost_thread.c
@@ -0,0 +1,33 @@
+#include <rte_rwlock.h>
+
+#include "vhost_thread.h"
+
+static rte_rwlock_t vhost_thread_lock = RTE_RWLOCK_INITIALIZER;
+
+void
+vhost_thread_read_lock(void)
+ __rte_no_thread_safety_analysis
+{
+ rte_rwlock_read_lock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_read_unlock(void)
+ __rte_no_thread_safety_analysis
+{
+ rte_rwlock_read_unlock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_write_lock(void)
+ __rte_no_thread_safety_analysis
+{
+ rte_rwlock_write_lock(&vhost_thread_lock);
+}
+
+void
+vhost_thread_write_unlock(void)
+ __rte_no_thread_safety_analysis
+{
+ rte_rwlock_write_unlock(&vhost_thread_lock);
+}
diff --git a/lib/vhost/vhost_thread.h b/lib/vhost/vhost_thread.h
new file mode 100644
index 0000000000..3c44b1c030
--- /dev/null
+++ b/lib/vhost/vhost_thread.h
@@ -0,0 +1,12 @@
+#ifndef _VHOST_THREAD_H_
+#define _VHOST_THREAD_H_
+
+void vhost_thread_read_lock(void);
+
+void vhost_thread_read_unlock(void);
+
+void vhost_thread_write_lock(void);
+
+void vhost_thread_write_unlock(void);
+
+#endif
--
2.32.1 (Apple Git-133)
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v1] vhost: fix crash caused by accessing a freed vsocket
2024-07-02 7:48 ` Maxime Coquelin
@ 2024-07-08 23:50 ` Gongming Chen
0 siblings, 0 replies; 8+ messages in thread
From: Gongming Chen @ 2024-07-08 23:50 UTC (permalink / raw)
To: Maxime Coquelin; +Cc: chenbox, dev, stable
> On Jul 2, 2024, at 3:48 PM, Maxime Coquelin <maxime.coquelin@redhat.com> wrote:
>
> Hi Gongming,
>
> On 5/10/24 09:28, Gongming Chen wrote:
>> Hi Maxime and Chenbo,
>> Do you have any suggestions for how to address this?
>> Looking forward to hearing from you!
>
> Could you please have a try with latest DPDK main branch,
> and if it reproduces, rebase your series on top of it.
>
> I don't think it has been fixed, but we've done significant changes in
> fdman in this release so we need a rebase anyways.
>
> Thanks in advance,
> Maxime
Hi Maxime,
This bug still exists, I rebase the latest main branch and submit the v4 version.
Thank you for your review, looking forward to hearing from you!
Thanks,
Gongming
>
>> Thanks,
>> Gongming
>>> On Apr 3, 2024, at 11:52 PM, Gongming Chen <chengongming1900@outlook.com> wrote:
>>>
>>> Hi Maxime,
>>> Thanks for review.
>>>
>>>> On Apr 3, 2024, at 5:39 PM, Maxime Coquelin <maxime.coquelin@redhat.com> wrote:
>>>>
>>>> Hi Gongming,
>>>>
>>>> It's the 9th time the patch has been sent.
>>>> I'm not sure whether there are changes between them or these are just
>>>> re-sends, but that's something to avoid.
>>>>
>>>
>>> Sorry, there's something wrong with my mailbox.
>>> I will send a v1 version as the latest patch, but they are actually the same.
>>>
>>>> If there are differences, you should use versionning to highlight it.
>>>> If unsure, please check the contributions guidelines first.
>>>>
>>>> Regarding the patch itself, I don't know if this is avoidable, but I
>>>> would prefer we do not introduce yet another lock in there.
>>>>
>>>> Thanks,
>>>> Maxime
>>>>
>>>
>>> I totally agree with your.
>>> Therefore, initially I hoped to solve this problem without introducing
>>> new lock. However, the result was not expected.
>>>
>>> 1. The vsocket is shared between the event and reconnect threads by
>>> transmitting the vsocket pointer. Therefore, there is no way to protect
>>> vsocket through a simple vsocket lock.
>>>
>>> 2. The event and reconnect threads can transmit vsocket pointers to
>>> each other, so there is no way to ensure that vsocket will not be
>>> accessed by locking the two threads separately.
>>>
>>> 3. Therefore, on the vsocket resource, event and reconnect are in the
>>> same critical section. Only by locking two threads at the same time
>>> can the vsocket be ensured that it will not be accessed and can be
>>> freed safely.
>>>
>>> Currently, app config, event, and reconnect threads respectively have
>>> locks corresponding to their own maintenance resources,
>>> vhost_user.mutex, pfdset->fd_mutex, and reconn_list.mutex.
>>>
>>> I think there is a thread-level lock missing here to protect the
>>> critical section between threads, just like the rcu scene protection.
>>>
>>> After app config acquires the write lock, it ensures that the event and
>>> reconnect threads are outside the critical section.
>>> This is to completely clean up the resources associated with vsocket
>>> and safely free vsocket.
>>>
>>> Therefore, considering future expansion, if there may be more
>>> resources like vsocket, this thread lock can also be used to ensure
>>> that resources are safely released after complete cleanup.
>>>
>>> In this way, the threads will be clearer, and the complicated try lock
>>> method is no longer needed.
>>>
>>> Thanks,
>>> Gongming
>
>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v4] vhost: fix crash caused by accessing a freed vsocket
2024-07-08 4:41 ` [PATCH v4] " Gongming Chen
@ 2024-07-09 7:26 ` David Marchand
0 siblings, 0 replies; 8+ messages in thread
From: David Marchand @ 2024-07-09 7:26 UTC (permalink / raw)
To: Gongming Chen
Cc: maxime.coquelin, chenbox, dev, Gongming Chen, stable, Thomas Monjalon
Hello,
On Mon, Jul 8, 2024 at 6:41 AM Gongming Chen
<chengongming1900@outlook.com> wrote:
>
> From: Gongming Chen <chengm11@chinatelecom.cn>
>
> When a vhost user message handling error in the event dispatch thread,
> vsocket reconn is added to the reconnection list of the reconnection
> thread.
> Since the reconnection, event dispatching and app configuration thread
> do not have common thread protection restrictions, the app config
> thread freed vsocket in the rte_vhost_driver_unregister process,
> but vsocket reconn can still exist in the reconn_list through this
> mechanism.
> Then in the reconnection thread, the vsocket is connected again and
> conn is added to the dispatch thread.
> Finally, the vsocket that has been freed by rte_vhost_driver_unregister
> is accessed again in the event dispatch thread, resulting in a
> use-after-free error.
>
> This patch adds a vhost threads read-write lock to restrict
> reconnection, event dispatching and app configuration threads.
> When the vhost driver unregisters, it exclusively holds the lock to
> safely free the vsocket.
>
> #0 0x0000000000000025 in ?? ()
> #1 0x0000000003ed7ca0 in vhost_user_read_cb at lib/vhost/socket.c:323
> #2 0x0000000003ed625f in fdset_event_dispatch at lib/vhost/fd_man.c:365
>
> Fixes: e623e0c6d8a5 ("vhost: add vhost-user client mode")
> Cc: stable@dpdk.org
>
> Signed-off-by: Gongming Chen <chengm11@chinatelecom.cn>
Maxime is off for the coming weeks.
Adding one lock is risky at this point of the release, especially as
it is mixed with other locks.
I prefer not to take this fix without an in depth review, and ideally
a ack from Maxime.
I marked this patch as deferred to the next release.
--
David Marchand
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2024-07-09 7:26 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-04-03 16:05 [PATCH v1] vhost: fix crash caused by accessing a freed vsocket Gongming Chen
2024-05-10 7:28 ` Gongming Chen
2024-07-02 7:48 ` Maxime Coquelin
2024-07-08 23:50 ` Gongming Chen
2024-07-08 2:17 ` [PATCH v2] " Gongming Chen
2024-07-08 2:23 ` [PATCH v3] " Gongming Chen
2024-07-08 4:41 ` [PATCH v4] " Gongming Chen
2024-07-09 7:26 ` David Marchand
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).