From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, david.marchand@redhat.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH 3/7] vhost: improve socket layer logs
Date: Thu, 23 Dec 2021 09:36:55 +0100 [thread overview]
Message-ID: <20211223083659.245766-4-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20211223083659.245766-1-maxime.coquelin@redhat.com>
This patch adds the Vhost socket path whenever possible in
order to make debugging possible when multiple Vhost
devices are in use.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/vhost/socket.c | 129 ++++++++++++++++++---------------------------
1 file changed, 51 insertions(+), 78 deletions(-)
diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c
index 82963c1e6d..fd6b3a3ee4 100644
--- a/lib/vhost/socket.c
+++ b/lib/vhost/socket.c
@@ -124,12 +124,12 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int max_fds,
ret = recvmsg(sockfd, &msgh, 0);
if (ret <= 0) {
if (ret)
- VHOST_LOG_CONFIG(ERR, "recvmsg failed\n");
+ VHOST_LOG_CONFIG(ERR, "recvmsg failed (fd %d)\n", sockfd);
return ret;
}
if (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {
- VHOST_LOG_CONFIG(ERR, "truncated msg\n");
+ VHOST_LOG_CONFIG(ERR, "truncated msg (fd %d)\n", sockfd);
return -1;
}
@@ -192,7 +192,7 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
} while (ret < 0 && errno == EINTR);
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR, "sendmsg error\n");
+ VHOST_LOG_CONFIG(ERR, "sendmsg error (fd %d)\n", sockfd);
return ret;
}
@@ -243,14 +243,14 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
dev->async_copy = 1;
}
- VHOST_LOG_CONFIG(INFO, "new device, handle is %d, path is %s\n", vid, vsocket->path);
+ VHOST_LOG_CONFIG(INFO, "(%s) new device, handle is %d\n", vsocket->path, vid);
if (vsocket->notify_ops->new_connection) {
ret = vsocket->notify_ops->new_connection(vid);
if (ret < 0) {
VHOST_LOG_CONFIG(ERR,
- "failed to add vhost user connection with fd %d\n",
- fd);
+ "(%s) failed to add vhost user connection with fd %d\n",
+ vsocket->path, fd);
goto err_cleanup;
}
}
@@ -261,9 +261,8 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
ret = fdset_add(&vhost_user.fdset, fd, vhost_user_read_cb,
NULL, conn);
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR,
- "failed to add fd %d into vhost server fdset\n",
- fd);
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to add fd %d into vhost server fdset\n",
+ vsocket->path, fd);
if (vsocket->notify_ops->destroy_connection)
vsocket->notify_ops->destroy_connection(conn->vid);
@@ -295,7 +294,8 @@ vhost_user_server_new_connection(int fd, void *dat, int *remove __rte_unused)
if (fd < 0)
return;
- VHOST_LOG_CONFIG(INFO, "new vhost user connection is %d\n", fd);
+ VHOST_LOG_CONFIG(INFO, "(%s) new vhost user connection is %d\n",
+ vsocket->path, fd);
vhost_user_add_connection(fd, vsocket);
}
@@ -343,13 +343,13 @@ create_unix_socket(struct vhost_user_socket *vsocket)
fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (fd < 0)
return -1;
- VHOST_LOG_CONFIG(INFO, "vhost-user %s: socket created, fd: %d\n",
- vsocket->is_server ? "server" : "client", fd);
+ VHOST_LOG_CONFIG(INFO, "(%s) vhost-user %s: socket created, fd: %d\n",
+ vsocket->path, vsocket->is_server ? "server" : "client", fd);
if (!vsocket->is_server && fcntl(fd, F_SETFL, O_NONBLOCK)) {
VHOST_LOG_CONFIG(ERR,
- "vhost-user: can't set nonblocking mode for socket, fd: "
- "%d (%s)\n", fd, strerror(errno));
+ "(%s) vhost-user: can't set nonblocking mode for socket, fd: %d (%s)\n",
+ vsocket->path, fd, strerror(errno));
close(fd);
return -1;
}
@@ -382,12 +382,11 @@ vhost_user_start_server(struct vhost_user_socket *vsocket)
*/
ret = bind(fd, (struct sockaddr *)&vsocket->un, sizeof(vsocket->un));
if (ret < 0) {
- VHOST_LOG_CONFIG(ERR,
- "failed to bind to %s: %s; remove it and try again\n",
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to bind: %s; remove it and try again\n",
path, strerror(errno));
goto err;
}
- VHOST_LOG_CONFIG(INFO, "bind to %s\n", path);
+ VHOST_LOG_CONFIG(INFO, "(%s) binding succeeded\n", path);
ret = listen(fd, MAX_VIRTIO_BACKLOG);
if (ret < 0)
@@ -397,8 +396,8 @@ vhost_user_start_server(struct vhost_user_socket *vsocket)
NULL, vsocket);
if (ret < 0) {
VHOST_LOG_CONFIG(ERR,
- "failed to add listen fd %d to vhost server fdset\n",
- fd);
+ "(%s) failed to add listen fd %d to vhost server fdset\n",
+ path, fd);
goto err;
}
@@ -437,13 +436,11 @@ vhost_user_connect_nonblock(int fd, struct sockaddr *un, size_t sz)
flags = fcntl(fd, F_GETFL, 0);
if (flags < 0) {
- VHOST_LOG_CONFIG(ERR,
- "can't get flags for connfd %d\n", fd);
+ VHOST_LOG_CONFIG(ERR, "can't get flags for connfd %d\n", fd);
return -2;
}
if ((flags & O_NONBLOCK) && fcntl(fd, F_SETFL, flags & ~O_NONBLOCK)) {
- VHOST_LOG_CONFIG(ERR,
- "can't disable nonblocking on fd %d\n", fd);
+ VHOST_LOG_CONFIG(ERR, "can't disable nonblocking on fd %d\n", fd);
return -2;
}
return 0;
@@ -471,16 +468,14 @@ vhost_user_client_reconnect(void *arg __rte_unused)
sizeof(reconn->un));
if (ret == -2) {
close(reconn->fd);
- VHOST_LOG_CONFIG(ERR,
- "reconnection for fd %d failed\n",
- reconn->fd);
+ VHOST_LOG_CONFIG(ERR, "(%s) reconnection for fd %d failed\n",
+ reconn->vsocket->path, reconn->fd);
goto remove_fd;
}
if (ret == -1)
continue;
- VHOST_LOG_CONFIG(INFO,
- "%s: connected\n", reconn->vsocket->path);
+ VHOST_LOG_CONFIG(INFO, "%s: connected\n", reconn->vsocket->path);
vhost_user_add_connection(reconn->fd, reconn->vsocket);
remove_fd:
TAILQ_REMOVE(&reconn_list.head, reconn, next);
@@ -510,10 +505,8 @@ vhost_user_reconnect_init(void)
vhost_user_client_reconnect, NULL);
if (ret != 0) {
VHOST_LOG_CONFIG(ERR, "failed to create reconnect thread");
- if (pthread_mutex_destroy(&reconn_list.mutex)) {
- VHOST_LOG_CONFIG(ERR,
- "failed to destroy reconnect mutex");
- }
+ if (pthread_mutex_destroy(&reconn_list.mutex))
+ VHOST_LOG_CONFIG(ERR, "failed to destroy reconnect mutex");
}
return ret;
@@ -534,20 +527,17 @@ vhost_user_start_client(struct vhost_user_socket *vsocket)
return 0;
}
- VHOST_LOG_CONFIG(WARNING,
- "failed to connect to %s: %s\n",
- path, strerror(errno));
+ VHOST_LOG_CONFIG(WARNING, "(%s) failed to connect: %s\n", path, strerror(errno));
if (ret == -2 || !vsocket->reconnect) {
close(fd);
return -1;
}
- VHOST_LOG_CONFIG(INFO, "%s: reconnecting...\n", path);
+ VHOST_LOG_CONFIG(INFO, "(%s) reconnecting...\n", path);
reconn = malloc(sizeof(*reconn));
if (reconn == NULL) {
- VHOST_LOG_CONFIG(ERR,
- "failed to allocate memory for reconnect\n");
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for reconnect\n", path);
close(fd);
return -1;
}
@@ -701,8 +691,7 @@ rte_vhost_driver_get_features(const char *path, uint64_t *features)
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
- VHOST_LOG_CONFIG(ERR,
- "socket file %s is not registered yet.\n", path);
+ VHOST_LOG_CONFIG(ERR, "(%s) socket file is not registered yet.\n", path);
ret = -1;
goto unlock_exit;
}
@@ -714,9 +703,7 @@ rte_vhost_driver_get_features(const char *path, uint64_t *features)
}
if (vdpa_dev->ops->get_features(vdpa_dev, &vdpa_features) < 0) {
- VHOST_LOG_CONFIG(ERR,
- "failed to get vdpa features "
- "for socket file %s.\n", path);
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to get vdpa features for socket file.\n", path);
ret = -1;
goto unlock_exit;
}
@@ -754,8 +741,7 @@ rte_vhost_driver_get_protocol_features(const char *path,
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
- VHOST_LOG_CONFIG(ERR,
- "socket file %s is not registered yet.\n", path);
+ VHOST_LOG_CONFIG(ERR, "(%s) socket file is not registered yet.\n", path);
ret = -1;
goto unlock_exit;
}
@@ -768,9 +754,8 @@ rte_vhost_driver_get_protocol_features(const char *path,
if (vdpa_dev->ops->get_protocol_features(vdpa_dev,
&vdpa_protocol_features) < 0) {
- VHOST_LOG_CONFIG(ERR,
- "failed to get vdpa protocol features "
- "for socket file %s.\n", path);
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to get vdpa protocol features.\n",
+ path);
ret = -1;
goto unlock_exit;
}
@@ -794,8 +779,7 @@ rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (!vsocket) {
- VHOST_LOG_CONFIG(ERR,
- "socket file %s is not registered yet.\n", path);
+ VHOST_LOG_CONFIG(ERR, "(%s) socket file is not registered yet.\n", path);
ret = -1;
goto unlock_exit;
}
@@ -807,9 +791,8 @@ rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
}
if (vdpa_dev->ops->get_queue_num(vdpa_dev, &vdpa_queue_num) < 0) {
- VHOST_LOG_CONFIG(ERR,
- "failed to get vdpa queue number "
- "for socket file %s.\n", path);
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to get vdpa queue number.\n",
+ path);
ret = -1;
goto unlock_exit;
}
@@ -852,8 +835,8 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
pthread_mutex_lock(&vhost_user.mutex);
if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) {
- VHOST_LOG_CONFIG(ERR,
- "error: the number of vhost sockets reaches maximum\n");
+ VHOST_LOG_CONFIG(ERR, "(%s) the number of vhost sockets reaches maximum\n",
+ path);
goto out;
}
@@ -863,16 +846,14 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
memset(vsocket, 0, sizeof(struct vhost_user_socket));
vsocket->path = strdup(path);
if (vsocket->path == NULL) {
- VHOST_LOG_CONFIG(ERR,
- "error: failed to copy socket path string\n");
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to copy socket path string\n", path);
vhost_user_socket_mem_free(vsocket);
goto out;
}
TAILQ_INIT(&vsocket->conn_list);
ret = pthread_mutex_init(&vsocket->conn_mutex, NULL);
if (ret) {
- VHOST_LOG_CONFIG(ERR,
- "error: failed to init connection mutex\n");
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to init connection mutex\n", path);
goto out_free;
}
vsocket->vdpa_dev = NULL;
@@ -884,9 +865,8 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
if (vsocket->async_copy &&
(flags & (RTE_VHOST_USER_IOMMU_SUPPORT |
RTE_VHOST_USER_POSTCOPY_SUPPORT))) {
- VHOST_LOG_CONFIG(ERR, "error: enabling async copy and IOMMU "
- "or post-copy feature simultaneously is not "
- "supported\n");
+ VHOST_LOG_CONFIG(ERR, "(%s) enabling async copy and IOMMU "
+ "or post-copy feature simultaneously is not supported\n", path);
goto out_mutex;
}
@@ -910,8 +890,8 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
if (vsocket->async_copy) {
vsocket->supported_features &= ~(1ULL << VHOST_F_LOG_ALL);
vsocket->features &= ~(1ULL << VHOST_F_LOG_ALL);
- VHOST_LOG_CONFIG(INFO,
- "Logging feature is disabled in async copy mode\n");
+ VHOST_LOG_CONFIG(INFO, "(%s) logging feature is disabled in async copy mode\n",
+ path);
}
/*
@@ -925,9 +905,8 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
(1ULL << VIRTIO_NET_F_HOST_TSO6) |
(1ULL << VIRTIO_NET_F_HOST_UFO);
- VHOST_LOG_CONFIG(INFO,
- "Linear buffers requested without external buffers, "
- "disabling host segmentation offloading support\n");
+ VHOST_LOG_CONFIG(INFO, "(%s) Linear buffers requested without external buffers, "
+ "disabling host segmentation offloading support\n", path);
vsocket->supported_features &= ~seg_offload_features;
vsocket->features &= ~seg_offload_features;
}
@@ -942,8 +921,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
~(1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT);
} else {
#ifndef RTE_LIBRTE_VHOST_POSTCOPY
- VHOST_LOG_CONFIG(ERR,
- "Postcopy requested but not compiled\n");
+ VHOST_LOG_CONFIG(ERR, "(%s) Postcopy requested but not compiled\n", path);
ret = -1;
goto out_mutex;
#endif
@@ -970,8 +948,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
out_mutex:
if (pthread_mutex_destroy(&vsocket->conn_mutex)) {
- VHOST_LOG_CONFIG(ERR,
- "error: failed to destroy connection mutex\n");
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to destroy connection mutex\n", path);
}
out_free:
vhost_user_socket_mem_free(vsocket);
@@ -1059,9 +1036,7 @@ rte_vhost_driver_unregister(const char *path)
goto again;
}
- VHOST_LOG_CONFIG(INFO,
- "free connfd = %d for device '%s'\n",
- conn->connfd, path);
+ VHOST_LOG_CONFIG(INFO, "(%s) free connfd %d\n", path, conn->connfd);
close(conn->connfd);
vhost_destroy_device(conn->vid);
TAILQ_REMOVE(&vsocket->conn_list, conn, next);
@@ -1137,8 +1112,7 @@ rte_vhost_driver_start(const char *path)
* rebuild the wait list of poll.
*/
if (fdset_pipe_init(&vhost_user.fdset) < 0) {
- VHOST_LOG_CONFIG(ERR,
- "failed to create pipe for vhost fdset\n");
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to create pipe for vhost fdset\n", path);
return -1;
}
@@ -1146,8 +1120,7 @@ rte_vhost_driver_start(const char *path)
"vhost-events", NULL, fdset_event_dispatch,
&vhost_user.fdset);
if (ret != 0) {
- VHOST_LOG_CONFIG(ERR,
- "failed to create fdset handling thread");
+ VHOST_LOG_CONFIG(ERR, "(%s) failed to create fdset handling thread", path);
fdset_pipe_uninit(&vhost_user.fdset);
return -1;
--
2.31.1
next prev parent reply other threads:[~2021-12-23 8:37 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-12-23 8:36 [PATCH 0/7] vhost: improve logging Maxime Coquelin
2021-12-23 8:36 ` [PATCH 1/7] vhost: improve IOTLB logs Maxime Coquelin
2022-01-04 14:44 ` David Marchand
2022-01-25 9:39 ` Maxime Coquelin
2021-12-23 8:36 ` [PATCH 2/7] vhost: improve vDPA registration failure log Maxime Coquelin
2021-12-23 8:36 ` Maxime Coquelin [this message]
2022-01-04 14:47 ` [PATCH 3/7] vhost: improve socket layer logs David Marchand
2022-01-25 10:44 ` Maxime Coquelin
2022-01-04 15:02 ` David Marchand
2022-01-25 10:50 ` Maxime Coquelin
2021-12-23 8:36 ` [PATCH 4/7] vhost: improve Vhost " Maxime Coquelin
2022-01-04 14:48 ` David Marchand
2022-01-25 10:50 ` Maxime Coquelin
2021-12-23 8:36 ` [PATCH 5/7] vhost: improve Vhost-user " Maxime Coquelin
2021-12-23 8:36 ` [PATCH 6/7] vhost: improve Virtio-net " Maxime Coquelin
2021-12-23 8:36 ` [PATCH 7/7] vhost: remove multi-line logs Maxime Coquelin
2021-12-23 15:59 ` [PATCH 0/7] vhost: improve logging Stephen Hemminger
2022-01-04 15:05 ` David Marchand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211223083659.245766-4-maxime.coquelin@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=chenbo.xia@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).