From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, david.marchand@redhat.com,
mkp@redhat.com, fbl@redhat.com, jasowang@redhat.com,
cunming.liang@intel.com, xieyongji@bytedance.com,
echaudro@redhat.com, eperezma@redhat.com, amorenoz@redhat.com,
lulu@redhat.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH v2 27/28] vhost: add multiqueue support to VDUSE
Date: Thu, 25 May 2023 18:15:49 +0200 [thread overview]
Message-ID: <20230525161551.70002-28-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20230525161551.70002-1-maxime.coquelin@redhat.com>
This patch enables control queue support in order to
support multiqueue.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
---
lib/vhost/vduse.c | 83 +++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 76 insertions(+), 7 deletions(-)
diff --git a/lib/vhost/vduse.c b/lib/vhost/vduse.c
index a10dc24d38..699cfed9e3 100644
--- a/lib/vhost/vduse.c
+++ b/lib/vhost/vduse.c
@@ -21,6 +21,7 @@
#include "iotlb.h"
#include "vduse.h"
#include "vhost.h"
+#include "virtio_net_ctrl.h"
#define VHOST_VDUSE_API_VERSION 0
#define VDUSE_CTRL_PATH "/dev/vduse/control"
@@ -41,7 +42,9 @@
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \
(1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
(1ULL << VIRTIO_F_IN_ORDER) | \
- (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
+ (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
+ (1ULL << VIRTIO_NET_F_MQ))
struct vduse {
struct fdset fdset;
@@ -141,6 +144,25 @@ static struct vhost_backend_ops vduse_backend_ops = {
.inject_irq = vduse_inject_irq,
};
+static void
+vduse_control_queue_event(int fd, void *arg, int *remove __rte_unused)
+{
+ struct virtio_net *dev = arg;
+ uint64_t buf;
+ int ret;
+
+ ret = read(fd, &buf, sizeof(buf));
+ if (ret < 0) {
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to read control queue event: %s\n",
+ strerror(errno));
+ return;
+ }
+
+ VHOST_LOG_CONFIG(dev->ifname, DEBUG, "Control queue kicked\n");
+ if (virtio_net_ctrl_handle(dev))
+ VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to handle ctrl request\n");
+}
+
static void
vduse_vring_setup(struct virtio_net *dev, unsigned int index)
{
@@ -212,6 +234,22 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index)
vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
return;
}
+
+ if (vq == dev->cvq) {
+ ret = fdset_add(&vduse.fdset, vq->kickfd, vduse_control_queue_event, NULL, dev);
+ if (ret) {
+ VHOST_LOG_CONFIG(dev->ifname, ERR,
+ "Failed to setup kickfd handler for VQ %u: %s\n",
+ index, strerror(errno));
+ vq_efd.fd = VDUSE_EVENTFD_DEASSIGN;
+ ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd);
+ close(vq->kickfd);
+ vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
+ }
+ fdset_pipe_notify(&vduse.fdset);
+ vhost_enable_guest_notification(dev, vq, 1);
+ VHOST_LOG_CONFIG(dev->ifname, INFO, "Ctrl queue event handler installed\n");
+ }
}
static void
@@ -258,6 +296,9 @@ vduse_device_start(struct virtio_net *dev)
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
+ if (vq == dev->cvq)
+ continue;
+
if (dev->notify_ops->vring_state_changed)
dev->notify_ops->vring_state_changed(dev->vid, i, vq->enabled);
}
@@ -334,9 +375,11 @@ vduse_device_create(const char *path)
{
int control_fd, dev_fd, vid, ret;
pthread_t fdset_tid;
- uint32_t i;
+ uint32_t i, max_queue_pairs, total_queues;
struct virtio_net *dev;
+ struct virtio_net_config vnet_config = { 0 };
uint64_t ver = VHOST_VDUSE_API_VERSION;
+ uint64_t features = VDUSE_NET_SUPPORTED_FEATURES;
struct vduse_dev_config *dev_config = NULL;
const char *name = path + strlen("/dev/vduse/");
@@ -376,22 +419,39 @@ vduse_device_create(const char *path)
goto out_ctrl_close;
}
- dev_config = malloc(offsetof(struct vduse_dev_config, config));
+ dev_config = malloc(offsetof(struct vduse_dev_config, config) +
+ sizeof(vnet_config));
if (!dev_config) {
VHOST_LOG_CONFIG(name, ERR, "Failed to allocate VDUSE config\n");
ret = -1;
goto out_ctrl_close;
}
+ ret = rte_vhost_driver_get_queue_num(path, &max_queue_pairs);
+ if (ret < 0) {
+ VHOST_LOG_CONFIG(name, ERR, "Failed to get max queue pairs\n");
+ goto out_free;
+ }
+
+ VHOST_LOG_CONFIG(path, INFO, "VDUSE max queue pairs: %u\n", max_queue_pairs);
+ total_queues = max_queue_pairs * 2;
+
+ if (max_queue_pairs == 1)
+ features &= ~(RTE_BIT64(VIRTIO_NET_F_CTRL_VQ) | RTE_BIT64(VIRTIO_NET_F_MQ));
+ else
+ total_queues += 1; /* Includes ctrl queue */
+
+ vnet_config.max_virtqueue_pairs = max_queue_pairs;
memset(dev_config, 0, sizeof(struct vduse_dev_config));
strncpy(dev_config->name, name, VDUSE_NAME_MAX - 1);
dev_config->device_id = VIRTIO_ID_NET;
dev_config->vendor_id = 0;
- dev_config->features = VDUSE_NET_SUPPORTED_FEATURES;
- dev_config->vq_num = 2;
+ dev_config->features = features;
+ dev_config->vq_num = total_queues;
dev_config->vq_align = sysconf(_SC_PAGE_SIZE);
- dev_config->config_size = 0;
+ dev_config->config_size = sizeof(struct virtio_net_config);
+ memcpy(dev_config->config, &vnet_config, sizeof(vnet_config));
ret = ioctl(control_fd, VDUSE_CREATE_DEV, dev_config);
if (ret < 0) {
@@ -433,7 +493,7 @@ vduse_device_create(const char *path)
dev->vduse_dev_fd = dev_fd;
vhost_setup_virtio_net(dev->vid, true, true, true, true);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < total_queues; i++) {
struct vduse_vq_config vq_cfg = { 0 };
ret = alloc_vring_queue(dev, i);
@@ -452,6 +512,8 @@ vduse_device_create(const char *path)
}
}
+ dev->cvq = dev->virtqueue[max_queue_pairs * 2];
+
ret = fdset_add(&vduse.fdset, dev->vduse_dev_fd, vduse_events_handler, NULL, dev);
if (ret) {
VHOST_LOG_CONFIG(name, ERR, "Failed to add fd %d to vduse fdset\n",
@@ -498,6 +560,13 @@ vduse_device_destroy(const char *path)
if (vid == RTE_MAX_VHOST_DEVICE)
return -1;
+ if (dev->cvq && dev->cvq->kickfd >= 0) {
+ fdset_del(&vduse.fdset, dev->cvq->kickfd);
+ fdset_pipe_notify(&vduse.fdset);
+ close(dev->cvq->kickfd);
+ dev->cvq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
+ }
+
fdset_del(&vduse.fdset, dev->vduse_dev_fd);
fdset_pipe_notify(&vduse.fdset);
--
2.40.1
next prev parent reply other threads:[~2023-05-25 16:19 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-25 16:15 [PATCH v2 00/28] Add VDUSE support to Vhost library Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 01/28] vhost: fix missing guest notif stat increment Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 02/28] vhost: fix invalid call FD handling Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 03/28] vhost: fix IOTLB entries overlap check with previous entry Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 04/28] vhost: add helper of IOTLB entries coredump Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 05/28] vhost: add helper for IOTLB entries shared page check Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 06/28] vhost: don't dump unneeded pages with IOTLB Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 07/28] vhost: change to single IOTLB cache per device Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 08/28] vhost: add offset field to IOTLB entries Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 09/28] vhost: add page size info to IOTLB entry Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 10/28] vhost: retry translating IOVA after IOTLB miss Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 11/28] vhost: introduce backend ops Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 12/28] vhost: add IOTLB cache entry removal callback Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 13/28] vhost: add helper for IOTLB misses Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 14/28] vhost: add helper for interrupt injection Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 15/28] vhost: add API to set max queue pairs Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 16/28] net/vhost: use " Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 17/28] vhost: add control virtqueue support Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 18/28] vhost: add VDUSE device creation and destruction Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 19/28] vhost: add VDUSE callback for IOTLB miss Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 20/28] vhost: add VDUSE callback for IOTLB entry removal Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 21/28] vhost: add VDUSE callback for IRQ injection Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 22/28] vhost: add VDUSE events handler Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 23/28] vhost: add support for virtqueue state get event Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 24/28] vhost: add support for VDUSE status set event Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 25/28] vhost: add support for VDUSE IOTLB update event Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 26/28] vhost: add VDUSE device startup Maxime Coquelin
2023-05-25 16:15 ` Maxime Coquelin [this message]
2023-05-25 16:15 ` [PATCH 28/28] vhost: Add VDUSE device stop Maxime Coquelin
2023-05-25 16:23 ` Maxime Coquelin
2023-05-25 16:15 ` [PATCH v2 28/28] vhost: add " Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230525161551.70002-28-maxime.coquelin@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=amorenoz@redhat.com \
--cc=chenbo.xia@intel.com \
--cc=cunming.liang@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=echaudro@redhat.com \
--cc=eperezma@redhat.com \
--cc=fbl@redhat.com \
--cc=jasowang@redhat.com \
--cc=lulu@redhat.com \
--cc=mkp@redhat.com \
--cc=xieyongji@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).