From: Stefan Hajnoczi <stefanha@redhat.com>
To: dev@dpdk.org
Cc: maxime.coquelin@redhat.com, Yuanhan Liu <yliu@fridaylinux.org>,
wei.w.wang@intel.com, mst@redhat.com, zhiyong.yang@intel.com,
jasowang@redhat.com, Stefan Hajnoczi <stefanha@redhat.com>
Subject: [dpdk-dev] [RFC 06/24] vhost: move vhost_user_connection to trans_af_unix.c
Date: Fri, 19 Jan 2018 13:44:26 +0000 [thread overview]
Message-ID: <20180119134444.24927-7-stefanha@redhat.com> (raw)
In-Reply-To: <20180119134444.24927-1-stefanha@redhat.com>
The AF_UNIX transport can accept multiple client connections on a server
socket. Each connection instantiates a separate vhost-user device,
which is stored as a vhost_user_connection. This behavior is specific
to AF_UNIX and other transports may not support N connections per
socket endpoint.
Move struct vhost_user_connection to trans_af_unix.c and
conn_list/conn_mutex into struct af_unix_socket.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
lib/librte_vhost/vhost.h | 19 ++-----------
lib/librte_vhost/socket.c | 36 ++----------------------
lib/librte_vhost/trans_af_unix.c | 60 ++++++++++++++++++++++++++++++++++++----
3 files changed, 59 insertions(+), 56 deletions(-)
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 7cbef04ab..734a8721d 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -302,13 +302,10 @@ struct virtio_net {
int slave_req_fd;
} __rte_cache_aligned;
-/* The vhost_user, vhost_user_socket, vhost_user_connection, and reconnect
- * declarations are temporary measures for moving AF_UNIX code into
- * trans_af_unix.c. They will be cleaned up as socket.c is untangled from
- * trans_af_unix.c.
+/* The vhost_user, vhost_user_socket, and reconnect declarations are temporary
+ * measures for moving AF_UNIX code into trans_af_unix.c. They will be cleaned
+ * up as socket.c is untangled from trans_af_unix.c.
*/
-TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection);
-
/*
* Every time rte_vhost_driver_register() is invoked, an associated
* vhost_user_socket struct will be created.
@@ -319,8 +316,6 @@ TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection);
* struct.
*/
struct vhost_user_socket {
- struct vhost_user_connection_list conn_list;
- pthread_mutex_t conn_mutex;
char *path;
bool is_server;
bool reconnect;
@@ -341,14 +336,6 @@ struct vhost_user_socket {
struct vhost_transport_ops const *trans_ops;
};
-struct vhost_user_connection {
- struct vhost_user_socket *vsocket;
- int connfd;
- int vid;
-
- TAILQ_ENTRY(vhost_user_connection) next;
-};
-
#define MAX_VHOST_SOCKET 1024
struct vhost_user {
struct vhost_user_socket *vsockets[MAX_VHOST_SOCKET];
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index f8a96ab5f..4fd86fd5b 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -153,13 +153,6 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
free(vsocket);
goto out;
}
- TAILQ_INIT(&vsocket->conn_list);
- ret = pthread_mutex_init(&vsocket->conn_mutex, NULL);
- if (ret) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "error: failed to init connection mutex\n");
- goto out_free;
- }
vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
/*
@@ -186,14 +179,14 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
if (vsocket->reconnect && reconn_tid == 0) {
if (vhost_user_reconnect_init() != 0)
- goto out_mutex;
+ goto out_free;
}
} else {
vsocket->is_server = true;
}
ret = trans_ops->socket_init(vsocket, flags);
if (ret < 0) {
- goto out_mutex;
+ goto out_free;
}
vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket;
@@ -201,11 +194,6 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
pthread_mutex_unlock(&vhost_user.mutex);
return ret;
-out_mutex:
- if (pthread_mutex_destroy(&vsocket->conn_mutex)) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "error: failed to destroy connection mutex\n");
- }
out_free:
free(vsocket->path);
free(vsocket);
@@ -223,7 +211,6 @@ rte_vhost_driver_unregister(const char *path)
{
int i;
int count;
- struct vhost_user_connection *conn, *next;
pthread_mutex_lock(&vhost_user.mutex);
@@ -232,25 +219,6 @@ rte_vhost_driver_unregister(const char *path)
if (!strcmp(vsocket->path, path)) {
vsocket->trans_ops->socket_cleanup(vsocket);
-
- pthread_mutex_lock(&vsocket->conn_mutex);
- for (conn = TAILQ_FIRST(&vsocket->conn_list);
- conn != NULL;
- conn = next) {
- next = TAILQ_NEXT(conn, next);
-
- fdset_del(&vhost_user.fdset, conn->connfd);
- RTE_LOG(INFO, VHOST_CONFIG,
- "free connfd = %d for device '%s'\n",
- conn->connfd, path);
- close(conn->connfd);
- vhost_destroy_device(conn->vid);
- TAILQ_REMOVE(&vsocket->conn_list, conn, next);
- free(conn);
- }
- pthread_mutex_unlock(&vsocket->conn_mutex);
-
- pthread_mutex_destroy(&vsocket->conn_mutex);
free(vsocket->path);
free(vsocket);
diff --git a/lib/librte_vhost/trans_af_unix.c b/lib/librte_vhost/trans_af_unix.c
index 6c22093a4..747fd9690 100644
--- a/lib/librte_vhost/trans_af_unix.c
+++ b/lib/librte_vhost/trans_af_unix.c
@@ -44,8 +44,20 @@
#define MAX_VIRTIO_BACKLOG 128
+TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection);
+
+struct vhost_user_connection {
+ struct vhost_user_socket *vsocket;
+ int connfd;
+ int vid;
+
+ TAILQ_ENTRY(vhost_user_connection) next;
+};
+
struct af_unix_socket {
struct vhost_user_socket socket; /* must be the first field! */
+ struct vhost_user_connection_list conn_list;
+ pthread_mutex_t conn_mutex;
int socket_fd;
struct sockaddr_un un;
};
@@ -144,6 +156,8 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
static void
vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
{
+ struct af_unix_socket *s =
+ container_of(vsocket, struct af_unix_socket, socket);
int vid;
size_t size;
struct vhost_user_connection *conn;
@@ -194,9 +208,9 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
goto err;
}
- pthread_mutex_lock(&vsocket->conn_mutex);
- TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next);
- pthread_mutex_unlock(&vsocket->conn_mutex);
+ pthread_mutex_lock(&s->conn_mutex);
+ TAILQ_INSERT_TAIL(&s->conn_list, conn, next);
+ pthread_mutex_unlock(&s->conn_mutex);
return;
err:
@@ -223,6 +237,8 @@ vhost_user_read_cb(int connfd, void *dat, int *remove)
{
struct vhost_user_connection *conn = dat;
struct vhost_user_socket *vsocket = conn->vsocket;
+ struct af_unix_socket *s =
+ container_of(vsocket, struct af_unix_socket, socket);
int ret;
ret = vhost_user_msg_handler(conn->vid, connfd);
@@ -234,9 +250,9 @@ vhost_user_read_cb(int connfd, void *dat, int *remove)
if (vsocket->notify_ops->destroy_connection)
vsocket->notify_ops->destroy_connection(conn->vid);
- pthread_mutex_lock(&vsocket->conn_mutex);
- TAILQ_REMOVE(&vsocket->conn_list, conn, next);
- pthread_mutex_unlock(&vsocket->conn_mutex);
+ pthread_mutex_lock(&s->conn_mutex);
+ TAILQ_REMOVE(&s->conn_list, conn, next);
+ pthread_mutex_unlock(&s->conn_mutex);
free(conn);
@@ -507,6 +523,18 @@ static int
af_unix_socket_init(struct vhost_user_socket *vsocket,
uint64_t flags __rte_unused)
{
+ struct af_unix_socket *s =
+ container_of(vsocket, struct af_unix_socket, socket);
+ int ret;
+
+ TAILQ_INIT(&s->conn_list);
+ ret = pthread_mutex_init(&s->conn_mutex, NULL);
+ if (ret) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "error: failed to init connection mutex\n");
+ return -1;
+ }
+
return create_unix_socket(vsocket);
}
@@ -515,6 +543,7 @@ af_unix_socket_cleanup(struct vhost_user_socket *vsocket)
{
struct af_unix_socket *s =
container_of(vsocket, struct af_unix_socket, socket);
+ struct vhost_user_connection *conn, *next;
if (vsocket->is_server) {
fdset_del(&vhost_user.fdset, s->socket_fd);
@@ -523,6 +552,25 @@ af_unix_socket_cleanup(struct vhost_user_socket *vsocket)
} else if (vsocket->reconnect) {
vhost_user_remove_reconnect(vsocket);
}
+
+ pthread_mutex_lock(&s->conn_mutex);
+ for (conn = TAILQ_FIRST(&s->conn_list);
+ conn != NULL;
+ conn = next) {
+ next = TAILQ_NEXT(conn, next);
+
+ fdset_del(&vhost_user.fdset, conn->connfd);
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "free connfd = %d for device '%s'\n",
+ conn->connfd, vsocket->path);
+ close(conn->connfd);
+ vhost_destroy_device(conn->vid);
+ TAILQ_REMOVE(&s->conn_list, conn, next);
+ free(conn);
+ }
+ pthread_mutex_unlock(&s->conn_mutex);
+
+ pthread_mutex_destroy(&s->conn_mutex);
}
static int
--
2.14.3
next prev parent reply other threads:[~2018-01-19 13:45 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-01-19 13:44 [dpdk-dev] [RFC 00/24] vhost: add virtio-vhost-user transport Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 01/24] vhost: move vring_call() into trans_af_unix.c Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 02/24] vhost: move AF_UNIX code from socket.c to trans_af_unix.c Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 03/24] vhost: allocate per-socket transport state Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 04/24] vhost: move socket_fd and un sockaddr into trans_af_unix.c Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 05/24] vhost: move start_server/client() calls to trans_af_unix.c Stefan Hajnoczi
2018-01-19 13:44 ` Stefan Hajnoczi [this message]
2018-01-19 13:44 ` [dpdk-dev] [RFC 07/24] vhost: move vhost_user_reconnect_init() into trans_af_unix.c Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 08/24] vhost: move vhost_user.fdset to trans_af_unix.c Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 09/24] vhost: pass vhost_transport_ops through vhost_new_device() Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 10/24] vhost: embed struct virtio_net inside struct vhost_user_connection Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 11/24] vhost: extract vhost_user.c socket I/O into transport Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 12/24] vhost: move slave_req_fd field to AF_UNIX transport Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 13/24] vhost: move mmap/munmap " Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 14/24] vhost: move librte_vhost to drivers/ Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 15/24] vhost: add virtio pci framework Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 16/24] vhost: remember a vhost_virtqueue's queue index Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 17/24] vhost: add virtio-vhost-user transport Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 18/24] vhost: add RTE_VHOST_USER_VIRTIO_TRANSPORT flag Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 19/24] net/vhost: add virtio-vhost-user support Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 20/24] examples/vhost_scsi: add --socket-file argument Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 21/24] examples/vhost_scsi: add virtio-vhost-user support Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 22/24] usertools: add virtio-vhost-user devices to dpdk-devbind.py Stefan Hajnoczi
2018-01-19 13:44 ` [dpdk-dev] [RFC 23/24] WORKAROUND revert virtio-net mq vring deletion Stefan Hajnoczi
2018-01-30 17:52 ` Maxime Coquelin
2018-01-19 13:44 ` [dpdk-dev] [RFC 24/24] WORKAROUND examples/vhost_scsi: avoid broken EVENT_IDX Stefan Hajnoczi
2018-01-19 19:31 ` Michael S. Tsirkin
2018-01-31 10:02 ` [dpdk-dev] [RFC 00/24] vhost: add virtio-vhost-user transport Maxime Coquelin
[not found] ` <20180410093847.GA22081@stefanha-x1.localdomain>
2018-04-10 14:56 ` Wang, Wei W
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180119134444.24927-7-stefanha@redhat.com \
--to=stefanha@redhat.com \
--cc=dev@dpdk.org \
--cc=jasowang@redhat.com \
--cc=maxime.coquelin@redhat.com \
--cc=mst@redhat.com \
--cc=wei.w.wang@intel.com \
--cc=yliu@fridaylinux.org \
--cc=zhiyong.yang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).