From: Nikos Dragazis <ndragazis@arrikto.com>
To: dev@dpdk.org
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>,
Tiwei Bie <tiwei.bie@intel.com>,
Zhihong Wang <zhihong.wang@intel.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Wei Wang <wei.w.wang@intel.com>,
Stojaczyk Dariusz <dariusz.stojaczyk@intel.com>,
Vangelis Koukis <vkoukis@arrikto.com>
Subject: [dpdk-dev] [PATCH 06/28] vhost: move vhost-user connection
Date: Wed, 19 Jun 2019 18:14:31 +0300 [thread overview]
Message-ID: <1560957293-17294-7-git-send-email-ndragazis@arrikto.com> (raw)
In-Reply-To: <1560957293-17294-1-git-send-email-ndragazis@arrikto.com>
The AF_UNIX transport can accept multiple client connections on a server
socket. Each connection instantiates a separate vhost-user device,
which is stored as a vhost_user_connection. This behavior is specific
to AF_UNIX and other transports may not support N connections per
socket endpoint.
Move struct vhost_user_connection to trans_af_unix.c and
conn_list/conn_mutex into struct af_unix_socket.
Signed-off-by: Nikos Dragazis <ndragazis@arrikto.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
lib/librte_vhost/socket.c | 54 +++---------------------------
lib/librte_vhost/trans_af_unix.c | 72 ++++++++++++++++++++++++++++++++++++----
lib/librte_vhost/vhost.h | 19 ++---------
3 files changed, 74 insertions(+), 71 deletions(-)
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index df6d707..976343c 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -341,13 +341,6 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
vhost_user_socket_mem_free(vsocket);
goto out;
}
- TAILQ_INIT(&vsocket->conn_list);
- ret = pthread_mutex_init(&vsocket->conn_mutex, NULL);
- if (ret) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "error: failed to init connection mutex\n");
- goto out_free;
- }
vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
/*
@@ -395,7 +388,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
RTE_LOG(ERR, VHOST_CONFIG,
"Postcopy requested but not compiled\n");
ret = -1;
- goto out_mutex;
+ goto out_free;
#endif
}
@@ -403,14 +396,14 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
if (vsocket->reconnect && reconn_tid == 0) {
if (vhost_user_reconnect_init() != 0)
- goto out_mutex;
+ goto out_free;
}
} else {
vsocket->is_server = true;
}
ret = trans_ops->socket_init(vsocket, flags);
if (ret < 0) {
- goto out_mutex;
+ goto out_free;
}
vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket;
@@ -418,11 +411,6 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
pthread_mutex_unlock(&vhost_user.mutex);
return ret;
-out_mutex:
- if (pthread_mutex_destroy(&vsocket->conn_mutex)) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "error: failed to destroy connection mutex\n");
- }
out_free:
vhost_user_socket_mem_free(vsocket);
out:
@@ -439,51 +427,19 @@ rte_vhost_driver_unregister(const char *path)
{
int i;
int count;
- struct vhost_user_connection *conn, *next;
if (path == NULL)
return -1;
-again:
pthread_mutex_lock(&vhost_user.mutex);
for (i = 0; i < vhost_user.vsocket_cnt; i++) {
struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
if (!strcmp(vsocket->path, path)) {
- pthread_mutex_lock(&vsocket->conn_mutex);
- for (conn = TAILQ_FIRST(&vsocket->conn_list);
- conn != NULL;
- conn = next) {
- next = TAILQ_NEXT(conn, next);
-
- /*
- * If r/wcb is executing, release the
- * conn_mutex lock, and try again since
- * the r/wcb may use the conn_mutex lock.
- */
- if (fdset_try_del(&vhost_user.fdset,
- conn->connfd) == -1) {
- pthread_mutex_unlock(
- &vsocket->conn_mutex);
- pthread_mutex_unlock(&vhost_user.mutex);
- goto again;
- }
-
- RTE_LOG(INFO, VHOST_CONFIG,
- "free connfd = %d for device '%s'\n",
- conn->connfd, path);
- close(conn->connfd);
- vhost_destroy_device(conn->vid);
- TAILQ_REMOVE(&vsocket->conn_list, conn, next);
- free(conn);
- }
- pthread_mutex_unlock(&vsocket->conn_mutex);
-
vsocket->trans_ops->socket_cleanup(vsocket);
-
- pthread_mutex_destroy(&vsocket->conn_mutex);
- vhost_user_socket_mem_free(vsocket);
+ free(vsocket->path);
+ free(vsocket);
count = --vhost_user.vsocket_cnt;
vhost_user.vsockets[i] = vhost_user.vsockets[count];
diff --git a/lib/librte_vhost/trans_af_unix.c b/lib/librte_vhost/trans_af_unix.c
index 93d11f7..58fc9e2 100644
--- a/lib/librte_vhost/trans_af_unix.c
+++ b/lib/librte_vhost/trans_af_unix.c
@@ -15,8 +15,20 @@
#define MAX_VIRTIO_BACKLOG 128
+TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection);
+
+struct vhost_user_connection {
+ struct vhost_user_socket *vsocket;
+ int connfd;
+ int vid;
+
+ TAILQ_ENTRY(vhost_user_connection) next;
+};
+
struct af_unix_socket {
struct vhost_user_socket socket; /* must be the first field! */
+ struct vhost_user_connection_list conn_list;
+ pthread_mutex_t conn_mutex;
int socket_fd;
struct sockaddr_un un;
};
@@ -131,6 +143,8 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
static void
vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
{
+ struct af_unix_socket *af_vsocket =
+ container_of(vsocket, struct af_unix_socket, socket);
int vid;
size_t size;
struct vhost_user_connection *conn;
@@ -188,9 +202,9 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
goto err_cleanup;
}
- pthread_mutex_lock(&vsocket->conn_mutex);
- TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next);
- pthread_mutex_unlock(&vsocket->conn_mutex);
+ pthread_mutex_lock(&af_vsocket->conn_mutex);
+ TAILQ_INSERT_TAIL(&af_vsocket->conn_list, conn, next);
+ pthread_mutex_unlock(&af_vsocket->conn_mutex);
fdset_pipe_notify(&vhost_user.fdset);
return;
@@ -221,6 +235,8 @@ vhost_user_read_cb(int connfd, void *dat, int *remove)
{
struct vhost_user_connection *conn = dat;
struct vhost_user_socket *vsocket = conn->vsocket;
+ struct af_unix_socket *af_vsocket =
+ container_of(vsocket, struct af_unix_socket, socket);
int ret;
ret = vhost_user_msg_handler(conn->vid, connfd);
@@ -238,9 +254,9 @@ vhost_user_read_cb(int connfd, void *dat, int *remove)
vhost_destroy_device(conn->vid);
- pthread_mutex_lock(&vsocket->conn_mutex);
- TAILQ_REMOVE(&vsocket->conn_list, conn, next);
- pthread_mutex_unlock(&vsocket->conn_mutex);
+ pthread_mutex_lock(&af_vsocket->conn_mutex);
+ TAILQ_REMOVE(&af_vsocket->conn_list, conn, next);
+ pthread_mutex_unlock(&af_vsocket->conn_mutex);
free(conn);
@@ -512,6 +528,18 @@ static int
af_unix_socket_init(struct vhost_user_socket *vsocket,
uint64_t flags __rte_unused)
{
+ struct af_unix_socket *af_vsocket =
+ container_of(vsocket, struct af_unix_socket, socket);
+ int ret;
+
+ TAILQ_INIT(&af_vsocket->conn_list);
+ ret = pthread_mutex_init(&af_vsocket->conn_mutex, NULL);
+ if (ret) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "error: failed to init connection mutex\n");
+ return -1;
+ }
+
return create_unix_socket(vsocket);
}
@@ -520,6 +548,7 @@ af_unix_socket_cleanup(struct vhost_user_socket *vsocket)
{
struct af_unix_socket *af_vsocket =
container_of(vsocket, struct af_unix_socket, socket);
+ struct vhost_user_connection *conn, *next;
if (vsocket->is_server) {
fdset_del(&vhost_user.fdset, af_vsocket->socket_fd);
@@ -528,6 +557,37 @@ af_unix_socket_cleanup(struct vhost_user_socket *vsocket)
} else if (vsocket->reconnect) {
vhost_user_remove_reconnect(vsocket);
}
+
+again:
+ pthread_mutex_lock(&af_vsocket->conn_mutex);
+ for (conn = TAILQ_FIRST(&af_vsocket->conn_list);
+ conn != NULL;
+ conn = next) {
+ next = TAILQ_NEXT(conn, next);
+
+ /*
+ * If r/wcb is executing, release the
+ * conn_mutex lock, and try again since
+ * the r/wcb may use the conn_mutex lock.
+ */
+ if (fdset_try_del(&vhost_user.fdset,
+ conn->connfd) == -1) {
+ pthread_mutex_unlock(
+ &af_vsocket->conn_mutex);
+ goto again;
+ }
+
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "free connfd = %d for device '%s'\n",
+ conn->connfd, vsocket->path);
+ close(conn->connfd);
+ vhost_destroy_device(conn->vid);
+ TAILQ_REMOVE(&af_vsocket->conn_list, conn, next);
+ free(conn);
+ }
+ pthread_mutex_unlock(&af_vsocket->conn_mutex);
+
+ pthread_mutex_destroy(&af_vsocket->conn_mutex);
}
static int
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index c74753b..5c3987d 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -404,13 +404,10 @@ struct virtio_net {
struct rte_vhost_user_extern_ops extern_ops;
} __rte_cache_aligned;
-/* The vhost_user, vhost_user_socket, vhost_user_connection, and reconnect
- * declarations are temporary measures for moving AF_UNIX code into
- * trans_af_unix.c. They will be cleaned up as socket.c is untangled from
- * trans_af_unix.c.
+/* The vhost_user, vhost_user_socket, and reconnect declarations are temporary
+ * measures for moving AF_UNIX code into trans_af_unix.c. They will be cleaned
+ * up as socket.c is untangled from trans_af_unix.c.
*/
-TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection);
-
/*
* Every time rte_vhost_driver_register() is invoked, an associated
* vhost_user_socket struct will be created.
@@ -421,8 +418,6 @@ TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection);
* struct.
*/
struct vhost_user_socket {
- struct vhost_user_connection_list conn_list;
- pthread_mutex_t conn_mutex;
char *path;
bool is_server;
bool reconnect;
@@ -453,14 +448,6 @@ struct vhost_user_socket {
struct vhost_transport_ops const *trans_ops;
};
-struct vhost_user_connection {
- struct vhost_user_socket *vsocket;
- int connfd;
- int vid;
-
- TAILQ_ENTRY(vhost_user_connection) next;
-};
-
#define MAX_VHOST_SOCKET 1024
struct vhost_user {
struct vhost_user_socket *vsockets[MAX_VHOST_SOCKET];
--
2.7.4
next prev parent reply other threads:[~2019-06-19 15:17 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-06-19 15:14 [dpdk-dev] [PATCH 00/28] vhost: add virtio-vhost-user transport Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 01/28] vhost: introduce vhost transport operations structure Nikos Dragazis
2019-06-19 20:14 ` Aaron Conole
2019-06-20 10:30 ` Bruce Richardson
2019-06-20 18:24 ` Nikos Dragazis
2019-06-20 18:19 ` Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 02/28] vhost: move socket management code Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 03/28] vhost: allocate per-socket transport state Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 04/28] vhost: move socket fd and un sockaddr Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 05/28] vhost: move start server/client calls Nikos Dragazis
2019-06-19 15:14 ` Nikos Dragazis [this message]
2019-06-19 15:14 ` [dpdk-dev] [PATCH 07/28] vhost: move vhost-user reconnection Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 08/28] vhost: move vhost-user fdset Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 09/28] vhost: propagate vhost transport operations Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 10/28] vhost: use a single structure for the device state Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 11/28] vhost: extract socket I/O into transport Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 12/28] vhost: move slave request fd and lock Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 13/28] vhost: move mmap/munmap Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 14/28] vhost: move setup of the log memory region Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 15/28] vhost: remove main fd parameter from msg handlers Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 16/28] vhost: move postcopy live migration code Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 17/28] vhost: support registering additional vhost-user transports Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 18/28] drivers/virtio_vhost_user: add virtio PCI framework Nikos Dragazis
2019-09-05 16:34 ` Maxime Coquelin
2019-09-09 8:42 ` Nikos Dragazis
2019-09-09 8:44 ` Maxime Coquelin
2019-06-19 15:14 ` [dpdk-dev] [PATCH 19/28] vhost: add index field in vhost virtqueues Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 20/28] drivers: add virtio-vhost-user transport Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 21/28] drivers/virtio_vhost_user: use additional device resources Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 22/28] vhost: add flag for choosing vhost-user transport Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 23/28] net/vhost: add virtio-vhost-user support Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 24/28] examples/vhost_scsi: add --socket-file argument Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 25/28] examples/vhost_scsi: add virtio-vhost-user support Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 26/28] mk: link apps with virtio-vhost-user driver Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 27/28] config: add option for the virtio-vhost-user transport Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 28/28] usertools: add virtio-vhost-user devices to dpdk-devbind.py Nikos Dragazis
[not found] ` <CGME20190620113240eucas1p22ca4faa64a36bbb7aec38a81298ade56@eucas1p2.samsung.com>
2019-06-20 11:32 ` [dpdk-dev] [PATCH 00/28] vhost: add virtio-vhost-user transport Ilya Maximets
2019-06-20 23:44 ` Nikos Dragazis
2019-06-20 11:35 ` Maxime Coquelin
2019-06-22 20:26 ` Nikos Dragazis
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1560957293-17294-7-git-send-email-ndragazis@arrikto.com \
--to=ndragazis@arrikto.com \
--cc=dariusz.stojaczyk@intel.com \
--cc=dev@dpdk.org \
--cc=maxime.coquelin@redhat.com \
--cc=stefanha@redhat.com \
--cc=tiwei.bie@intel.com \
--cc=vkoukis@arrikto.com \
--cc=wei.w.wang@intel.com \
--cc=zhihong.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).