DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nikos Dragazis <ndragazis@arrikto.com>
To: dev@dpdk.org
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>,
	Tiwei Bie <tiwei.bie@intel.com>,
	Zhihong Wang <zhihong.wang@intel.com>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Wei Wang <wei.w.wang@intel.com>,
	Stojaczyk Dariusz <dariusz.stojaczyk@intel.com>,
	Vangelis Koukis <vkoukis@arrikto.com>
Subject: [dpdk-dev] [PATCH 12/28] vhost: move slave request fd and lock
Date: Wed, 19 Jun 2019 18:14:37 +0300	[thread overview]
Message-ID: <1560957293-17294-13-git-send-email-ndragazis@arrikto.com> (raw)
In-Reply-To: <1560957293-17294-1-git-send-email-ndragazis@arrikto.com>

The slave request file descriptor is specific to the AF_UNIX transport.
Move this field along with its spinlock out of struct virtio_net and
into the trans_af_unix.c private struct vhost_user_connection struct.
This implies that we also had to move the associated functions
send_vhost_slave_message() and process_slave_message_reply() out of
vhost_user.c and into trans_af_unix.c. We also moved the spinlock
initialization out of vhost_new_connection() and into trans_af_unix.c.

This change will allow future transports to implement the slave request
fd without relying on socket I/O.

Signed-off-by: Nikos Dragazis <ndragazis@arrikto.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 lib/librte_vhost/trans_af_unix.c | 87 +++++++++++++++++++++++++++++++++++++++-
 lib/librte_vhost/vhost.c         |  4 +-
 lib/librte_vhost/vhost.h         | 41 +++++++++++++++++--
 lib/librte_vhost/vhost_user.c    | 67 ++++---------------------------
 4 files changed, 132 insertions(+), 67 deletions(-)

diff --git a/lib/librte_vhost/trans_af_unix.c b/lib/librte_vhost/trans_af_unix.c
index c0ba8df..5f9ef5a 100644
--- a/lib/librte_vhost/trans_af_unix.c
+++ b/lib/librte_vhost/trans_af_unix.c
@@ -29,6 +29,8 @@ struct vhost_user_connection {
 	struct virtio_net device; /* must be the first field! */
 	struct vhost_user_socket *vsocket;
 	int connfd;
+	int slave_req_fd;
+	rte_spinlock_t slave_req_lock;
 
 	TAILQ_ENTRY(vhost_user_connection) next;
 };
@@ -41,6 +43,7 @@ struct af_unix_socket {
 	struct sockaddr_un un;
 };
 
+int read_vhost_message(int sockfd, struct VhostUserMsg *msg);
 static int create_unix_socket(struct vhost_user_socket *vsocket);
 static int vhost_user_start_server(struct vhost_user_socket *vsocket);
 static int vhost_user_start_client(struct vhost_user_socket *vsocket);
@@ -161,8 +164,71 @@ af_unix_send_reply(struct virtio_net *dev, struct VhostUserMsg *msg)
 static int
 af_unix_send_slave_req(struct virtio_net *dev, struct VhostUserMsg *msg)
 {
-	return send_fd_message(dev->slave_req_fd, msg,
-			       VHOST_USER_HDR_SIZE + msg->size, msg->fds, msg->fd_num);
+	struct vhost_user_connection *conn =
+		container_of(dev, struct vhost_user_connection, device);
+	int ret;
+
+	if (msg->flags & VHOST_USER_NEED_REPLY)
+		rte_spinlock_lock(&conn->slave_req_lock);
+
+	ret = send_fd_message(conn->slave_req_fd, msg,
+			VHOST_USER_HDR_SIZE + msg->size, msg->fds, msg->fd_num);
+
+	if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
+		rte_spinlock_unlock(&conn->slave_req_lock);
+
+	return ret;
+}
+
+static int
+af_unix_process_slave_message_reply(struct virtio_net *dev,
+				    const struct VhostUserMsg *msg)
+{
+	struct vhost_user_connection *conn =
+		container_of(dev, struct vhost_user_connection, device);
+	struct VhostUserMsg msg_reply;
+	int ret;
+
+	if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
+		return 0;
+
+	if (read_vhost_message(conn->slave_req_fd, &msg_reply) < 0) {
+		ret = -1;
+		goto out;
+	}
+
+	if (msg_reply.request.slave != msg->request.slave) {
+		RTE_LOG(ERR, VHOST_CONFIG,
+			"Received unexpected msg type (%u), expected %u\n",
+			msg_reply.request.slave, msg->request.slave);
+		ret = -1;
+		goto out;
+	}
+
+	ret = msg_reply.payload.u64 ? -1 : 0;
+
+out:
+	rte_spinlock_unlock(&conn->slave_req_lock);
+	return ret;
+}
+
+static int
+af_unix_set_slave_req_fd(struct virtio_net *dev, struct VhostUserMsg *msg)
+{
+	struct vhost_user_connection *conn =
+		container_of(dev, struct vhost_user_connection, device);
+	int fd = msg->fds[0];
+
+	if (fd < 0) {
+		RTE_LOG(ERR, VHOST_CONFIG,
+				"Invalid file descriptor for slave channel (%d)\n",
+				fd);
+		return -1;
+	}
+
+	conn->slave_req_fd = fd;
+
+	return 0;
 }
 
 static void
@@ -185,7 +251,9 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
 
 	conn = container_of(dev, struct vhost_user_connection, device);
 	conn->connfd = fd;
+	conn->slave_req_fd = -1;
 	conn->vsocket = vsocket;
+	rte_spinlock_init(&conn->slave_req_lock);
 
 	size = strnlen(vsocket->path, PATH_MAX);
 	vhost_set_ifname(dev->vid, vsocket->path, size);
@@ -682,6 +750,18 @@ af_unix_socket_start(struct vhost_user_socket *vsocket)
 		return vhost_user_start_client(vsocket);
 }
 
+static void
+af_unix_cleanup_device(struct virtio_net *dev, int destroy __rte_unused)
+{
+	struct vhost_user_connection *conn =
+		container_of(dev, struct vhost_user_connection, device);
+
+	if (conn->slave_req_fd >= 0) {
+		close(conn->slave_req_fd);
+		conn->slave_req_fd = -1;
+	}
+}
+
 static int
 af_unix_vring_call(struct virtio_net *dev __rte_unused,
 		   struct vhost_virtqueue *vq)
@@ -697,7 +777,10 @@ const struct vhost_transport_ops af_unix_trans_ops = {
 	.socket_init = af_unix_socket_init,
 	.socket_cleanup = af_unix_socket_cleanup,
 	.socket_start = af_unix_socket_start,
+	.cleanup_device = af_unix_cleanup_device,
 	.vring_call = af_unix_vring_call,
 	.send_reply = af_unix_send_reply,
 	.send_slave_req = af_unix_send_slave_req,
+	.process_slave_message_reply = af_unix_process_slave_message_reply,
+	.set_slave_req_fd = af_unix_set_slave_req_fd,
 };
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 0fdc54f..5b16390 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -256,6 +256,8 @@ cleanup_device(struct virtio_net *dev, int destroy)
 
 	for (i = 0; i < dev->nr_vring; i++)
 		cleanup_vq(dev->virtqueue[i], destroy);
+
+	dev->trans_ops->cleanup_device(dev, destroy);
 }
 
 void
@@ -508,11 +510,9 @@ vhost_new_device(const struct vhost_transport_ops *trans_ops)
 	vhost_devices[i] = dev;
 	dev->vid = i;
 	dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
-	dev->slave_req_fd = -1;
 	dev->trans_ops = trans_ops;
 	dev->vdpa_dev_id = -1;
 	dev->postcopy_ufd = -1;
-	rte_spinlock_init(&dev->slave_req_lock);
 
 	return dev;
 }
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index b20773c..2213fbe 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -340,6 +340,16 @@ struct vhost_transport_ops {
 	int (*socket_start)(struct vhost_user_socket *vsocket);
 
 	/**
+	* Free resources associated with this device.
+	*
+	* @param dev
+	*  vhost device
+	* @param destroy
+	*  0 on device reset, 1 on full cleanup.
+	*/
+	void (*cleanup_device)(struct virtio_net *dev, int destroy);
+
+	/**
 	 * Notify the guest that used descriptors have been added to the vring.
 	 * The VRING_AVAIL_F_NO_INTERRUPT flag and event idx have already been checked
 	 * so this function just needs to perform the notification.
@@ -377,6 +387,34 @@ struct vhost_transport_ops {
 	 */
 	int (*send_slave_req)(struct virtio_net *dev,
 			      struct VhostUserMsg *req);
+
+	/**
+	 * Process the master's reply on a slave request.
+	 *
+	 * @param dev
+	 *  vhost device
+	 * @param msg
+	 *  slave request message
+	 * @return
+	 *  0 on success, -1 on failure
+	 */
+	int (*process_slave_message_reply)(struct virtio_net *dev,
+					   const struct VhostUserMsg *msg);
+
+	/**
+	 * Process VHOST_USER_SET_SLAVE_REQ_FD message.  After this function
+	 * succeeds send_slave_req() may be called to submit requests to the
+	 * master.
+	 *
+	 * @param dev
+	 *  vhost device
+	 * @param msg
+	 *  message
+	 * @return
+	 *  0 on success, -1 on failure
+	 */
+	int (*set_slave_req_fd)(struct virtio_net *dev,
+				struct VhostUserMsg *msg);
 };
 
 /** The traditional AF_UNIX vhost-user protocol transport. */
@@ -419,9 +457,6 @@ struct virtio_net {
 	uint32_t		max_guest_pages;
 	struct guest_page       *guest_pages;
 
-	int			slave_req_fd;
-	rte_spinlock_t		slave_req_lock;
-
 	int			postcopy_ufd;
 	int			postcopy_listening;
 
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 5c12435..a4dcba1 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -160,11 +160,6 @@ vhost_backend_cleanup(struct virtio_net *dev)
 		dev->log_addr = 0;
 	}
 
-	if (dev->slave_req_fd >= 0) {
-		close(dev->slave_req_fd);
-		dev->slave_req_fd = -1;
-	}
-
 	if (dev->postcopy_ufd >= 0) {
 		close(dev->postcopy_ufd);
 		dev->postcopy_ufd = -1;
@@ -1549,17 +1544,13 @@ static int
 vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
 			int main_fd __rte_unused)
 {
+	int ret;
 	struct virtio_net *dev = *pdev;
-	int fd = msg->fds[0];
 
-	if (fd < 0) {
-		RTE_LOG(ERR, VHOST_CONFIG,
-				"Invalid file descriptor for slave channel (%d)\n",
-				fd);
-		return RTE_VHOST_MSG_RESULT_ERR;
-	}
+	ret = dev->trans_ops->set_slave_req_fd(dev, msg);
 
-	dev->slave_req_fd = fd;
+	if (ret < 0)
+		return RTE_VHOST_MSG_RESULT_ERR;
 
 	return RTE_VHOST_MSG_RESULT_OK;
 }
@@ -1778,21 +1769,6 @@ send_vhost_reply(struct virtio_net *dev, struct VhostUserMsg *msg)
 	return dev->trans_ops->send_reply(dev, msg);
 }
 
-static int
-send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg)
-{
-	int ret;
-
-	if (msg->flags & VHOST_USER_NEED_REPLY)
-		rte_spinlock_lock(&dev->slave_req_lock);
-
-	ret = dev->trans_ops->send_slave_req(dev, msg);
-	if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
-		rte_spinlock_unlock(&dev->slave_req_lock);
-
-	return ret;
-}
-
 /*
  * Allocate a queue pair if it hasn't been allocated yet
  */
@@ -2069,35 +2045,6 @@ vhost_user_msg_handler(int vid, int fd, const struct VhostUserMsg *msg_)
 	return 0;
 }
 
-static int process_slave_message_reply(struct virtio_net *dev,
-				       const struct VhostUserMsg *msg)
-{
-	struct VhostUserMsg msg_reply;
-	int ret;
-
-	if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
-		return 0;
-
-	if (read_vhost_message(dev->slave_req_fd, &msg_reply) < 0) {
-		ret = -1;
-		goto out;
-	}
-
-	if (msg_reply.request.slave != msg->request.slave) {
-		RTE_LOG(ERR, VHOST_CONFIG,
-			"Received unexpected msg type (%u), expected %u\n",
-			msg_reply.request.slave, msg->request.slave);
-		ret = -1;
-		goto out;
-	}
-
-	ret = msg_reply.payload.u64 ? -1 : 0;
-
-out:
-	rte_spinlock_unlock(&dev->slave_req_lock);
-	return ret;
-}
-
 int
 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
 {
@@ -2113,7 +2060,7 @@ vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
 		},
 	};
 
-	ret = send_vhost_slave_req(dev, &msg);
+	ret = dev->trans_ops->send_slave_req(dev, &msg);
 	if (ret < 0) {
 		RTE_LOG(ERR, VHOST_CONFIG,
 				"Failed to send IOTLB miss message (%d)\n",
@@ -2148,14 +2095,14 @@ static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
 		msg.fd_num = 1;
 	}
 
-	ret = send_vhost_slave_message(dev, &msg);
+	ret = dev->trans_ops->send_slave_req(dev, &msg);
 	if (ret < 0) {
 		RTE_LOG(ERR, VHOST_CONFIG,
 			"Failed to set host notifier (%d)\n", ret);
 		return ret;
 	}
 
-	return process_slave_message_reply(dev, &msg);
+	return dev->trans_ops->process_slave_message_reply(dev, &msg);
 }
 
 int rte_vhost_host_notifier_ctrl(int vid, bool enable)
-- 
2.7.4


  parent reply	other threads:[~2019-06-19 15:19 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-19 15:14 [dpdk-dev] [PATCH 00/28] vhost: add virtio-vhost-user transport Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 01/28] vhost: introduce vhost transport operations structure Nikos Dragazis
2019-06-19 20:14   ` Aaron Conole
2019-06-20 10:30     ` Bruce Richardson
2019-06-20 18:24       ` Nikos Dragazis
2019-06-20 18:19     ` Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 02/28] vhost: move socket management code Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 03/28] vhost: allocate per-socket transport state Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 04/28] vhost: move socket fd and un sockaddr Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 05/28] vhost: move start server/client calls Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 06/28] vhost: move vhost-user connection Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 07/28] vhost: move vhost-user reconnection Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 08/28] vhost: move vhost-user fdset Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 09/28] vhost: propagate vhost transport operations Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 10/28] vhost: use a single structure for the device state Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 11/28] vhost: extract socket I/O into transport Nikos Dragazis
2019-06-19 15:14 ` Nikos Dragazis [this message]
2019-06-19 15:14 ` [dpdk-dev] [PATCH 13/28] vhost: move mmap/munmap Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 14/28] vhost: move setup of the log memory region Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 15/28] vhost: remove main fd parameter from msg handlers Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 16/28] vhost: move postcopy live migration code Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 17/28] vhost: support registering additional vhost-user transports Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 18/28] drivers/virtio_vhost_user: add virtio PCI framework Nikos Dragazis
2019-09-05 16:34   ` Maxime Coquelin
2019-09-09  8:42     ` Nikos Dragazis
2019-09-09  8:44       ` Maxime Coquelin
2019-06-19 15:14 ` [dpdk-dev] [PATCH 19/28] vhost: add index field in vhost virtqueues Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 20/28] drivers: add virtio-vhost-user transport Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 21/28] drivers/virtio_vhost_user: use additional device resources Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 22/28] vhost: add flag for choosing vhost-user transport Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 23/28] net/vhost: add virtio-vhost-user support Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 24/28] examples/vhost_scsi: add --socket-file argument Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 25/28] examples/vhost_scsi: add virtio-vhost-user support Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 26/28] mk: link apps with virtio-vhost-user driver Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 27/28] config: add option for the virtio-vhost-user transport Nikos Dragazis
2019-06-19 15:14 ` [dpdk-dev] [PATCH 28/28] usertools: add virtio-vhost-user devices to dpdk-devbind.py Nikos Dragazis
     [not found] ` <CGME20190620113240eucas1p22ca4faa64a36bbb7aec38a81298ade56@eucas1p2.samsung.com>
2019-06-20 11:32   ` [dpdk-dev] [PATCH 00/28] vhost: add virtio-vhost-user transport Ilya Maximets
2019-06-20 23:44     ` Nikos Dragazis
2019-06-20 11:35 ` Maxime Coquelin
2019-06-22 20:26   ` Nikos Dragazis

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1560957293-17294-13-git-send-email-ndragazis@arrikto.com \
    --to=ndragazis@arrikto.com \
    --cc=dariusz.stojaczyk@intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=stefanha@redhat.com \
    --cc=tiwei.bie@intel.com \
    --cc=vkoukis@arrikto.com \
    --cc=wei.w.wang@intel.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).