From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, david.marchand@redhat.com,
eperezma@redhat.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH v1 21/21] net/virtio-user: remove max queues limitation
Date: Wed, 30 Nov 2022 16:56:39 +0100 [thread overview]
Message-ID: <20221130155639.150553-22-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20221130155639.150553-1-maxime.coquelin@redhat.com>
This patch removes the limitation of 8 queue pairs by
dynamically allocating vring metadata once we know the
maximum number of queue pairs supported by the backend.
This is especially useful for Vhost-vDPA with physical
devices, where the maximum queues supported may be much
more than 8 pairs.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/net/virtio/virtio.h | 6 -
.../net/virtio/virtio_user/virtio_user_dev.c | 118 ++++++++++++++----
.../net/virtio/virtio_user/virtio_user_dev.h | 16 +--
drivers/net/virtio/virtio_user_ethdev.c | 17 +--
4 files changed, 109 insertions(+), 48 deletions(-)
diff --git a/drivers/net/virtio/virtio.h b/drivers/net/virtio/virtio.h
index 5c8f71a44d..04a897bf51 100644
--- a/drivers/net/virtio/virtio.h
+++ b/drivers/net/virtio/virtio.h
@@ -124,12 +124,6 @@
VIRTIO_NET_HASH_TYPE_UDP_EX)
-/*
- * Maximum number of virtqueues per device.
- */
-#define VIRTIO_MAX_VIRTQUEUE_PAIRS 8
-#define VIRTIO_MAX_VIRTQUEUES (VIRTIO_MAX_VIRTQUEUE_PAIRS * 2 + 1)
-
/* VirtIO device IDs. */
#define VIRTIO_ID_NETWORK 0x01
#define VIRTIO_ID_BLOCK 0x02
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 7c48c9bb29..aa24fdea70 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -17,6 +17,7 @@
#include <rte_alarm.h>
#include <rte_string_fns.h>
#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
#include "vhost.h"
#include "virtio_user_dev.h"
@@ -58,8 +59,8 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
int ret;
struct vhost_vring_file file;
struct vhost_vring_state state;
- struct vring *vring = &dev->vrings[queue_sel];
- struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
+ struct vring *vring = &dev->vrings.split[queue_sel];
+ struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
struct vhost_vring_addr addr = {
.index = queue_sel,
.log_guest_addr = 0,
@@ -299,18 +300,6 @@ virtio_user_dev_init_max_queue_pairs(struct virtio_user_dev *dev, uint32_t user_
return ret;
}
- if (dev->max_queue_pairs > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
- /*
- * If the device supports control queue, the control queue
- * index is max_virtqueue_pairs * 2. Disable MQ if it happens.
- */
- PMD_DRV_LOG(ERR, "(%s) Device advertises too many queues (%u, max supported %u)",
- dev->path, dev->max_queue_pairs, VIRTIO_MAX_VIRTQUEUE_PAIRS);
- dev->max_queue_pairs = 1;
-
- return -1;
- }
-
return 0;
}
@@ -579,6 +568,86 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
return 0;
}
+static int
+virtio_user_alloc_vrings(struct virtio_user_dev *dev)
+{
+ int i, size, nr_vrings;
+
+ nr_vrings = dev->max_queue_pairs * 2;
+ if (dev->hw_cvq)
+ nr_vrings++;
+
+ dev->callfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->callfds), 0);
+ if (!dev->callfds) {
+ PMD_INIT_LOG(ERR, "(%s) Failed to alloc callfds", dev->path);
+ return -1;
+ }
+
+ dev->kickfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->kickfds), 0);
+ if (!dev->kickfds) {
+ PMD_INIT_LOG(ERR, "(%s) Failed to alloc kickfds", dev->path);
+ goto free_callfds;
+ }
+
+ for (i = 0; i < nr_vrings; i++) {
+ dev->callfds[i] = -1;
+ dev->kickfds[i] = -1;
+ }
+
+ size = RTE_MAX(sizeof(*dev->vrings.split), sizeof(*dev->vrings.packed));
+ dev->vrings.ptr = rte_zmalloc("virtio_user_dev", nr_vrings * size, 0);
+ if (!dev->vrings.ptr) {
+ PMD_INIT_LOG(ERR, "(%s) Failed to alloc vrings metadata", dev->path);
+ goto free_kickfds;
+ }
+
+ dev->packed_queues = rte_zmalloc("virtio_user_dev",
+ nr_vrings * sizeof(*dev->packed_queues), 0);
+ if (!dev->packed_queues) {
+ PMD_INIT_LOG(ERR, "(%s) Failed to alloc packed queues metadata", dev->path);
+ goto free_vrings;
+ }
+
+ dev->qp_enabled = rte_zmalloc("virtio_user_dev",
+ dev->max_queue_pairs * sizeof(*dev->qp_enabled), 0);
+ if (!dev->qp_enabled) {
+ PMD_INIT_LOG(ERR, "(%s) Failed to alloc QP enable states", dev->path);
+ goto free_packed_queues;
+ }
+
+ return 0;
+
+free_packed_queues:
+ rte_free(dev->packed_queues);
+ dev->packed_queues = NULL;
+free_vrings:
+ rte_free(dev->vrings.ptr);
+ dev->vrings.ptr = NULL;
+free_kickfds:
+ rte_free(dev->kickfds);
+ dev->kickfds = NULL;
+free_callfds:
+ rte_free(dev->callfds);
+ dev->callfds = NULL;
+
+ return -1;
+}
+
+static void
+virtio_user_free_vrings(struct virtio_user_dev *dev)
+{
+ rte_free(dev->qp_enabled);
+ dev->qp_enabled = NULL;
+ rte_free(dev->packed_queues);
+ dev->packed_queues = NULL;
+ rte_free(dev->vrings.ptr);
+ dev->vrings.ptr = NULL;
+ rte_free(dev->kickfds);
+ dev->kickfds = NULL;
+ rte_free(dev->callfds);
+ dev->callfds = NULL;
+}
+
/* Use below macro to filter features from vhost backend */
#define VIRTIO_USER_SUPPORTED_FEATURES \
(1ULL << VIRTIO_NET_F_MAC | \
@@ -607,16 +676,10 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
enum virtio_user_backend_type backend_type)
{
uint64_t backend_features;
- int i;
pthread_mutex_init(&dev->mutex, NULL);
strlcpy(dev->path, path, PATH_MAX);
- for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) {
- dev->kickfds[i] = -1;
- dev->callfds[i] = -1;
- }
-
dev->started = 0;
dev->queue_pairs = 1; /* mq disabled by default */
dev->queue_size = queue_size;
@@ -661,9 +724,14 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
if (dev->max_queue_pairs > 1)
cq = 1;
+ if (virtio_user_alloc_vrings(dev) < 0) {
+ PMD_INIT_LOG(ERR, "(%s) Failed to allocate vring metadata", dev->path);
+ goto destroy;
+ }
+
if (virtio_user_dev_init_notify(dev) < 0) {
PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers", dev->path);
- goto destroy;
+ goto free_vrings;
}
if (virtio_user_fill_intr_handle(dev) < 0) {
@@ -722,6 +790,8 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
notify_uninit:
virtio_user_dev_uninit_notify(dev);
+free_vrings:
+ virtio_user_free_vrings(dev);
destroy:
dev->ops->destroy(dev);
@@ -742,6 +812,8 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
virtio_user_dev_uninit_notify(dev);
+ virtio_user_free_vrings(dev);
+
free(dev->ifname);
if (dev->is_server)
@@ -897,7 +969,7 @@ static void
virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
{
struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
- struct vring_packed *vring = &dev->packed_vrings[queue_idx];
+ struct vring_packed *vring = &dev->vrings.packed[queue_idx];
uint16_t n_descs, flags;
/* Perform a load-acquire barrier in desc_is_avail to
@@ -931,7 +1003,7 @@ virtio_user_handle_cq_split(struct virtio_user_dev *dev, uint16_t queue_idx)
uint16_t avail_idx, desc_idx;
struct vring_used_elem *uep;
uint32_t n_descs;
- struct vring *vring = &dev->vrings[queue_idx];
+ struct vring *vring = &dev->vrings.split[queue_idx];
/* Consume avail ring, using used ring idx as first one */
while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index e8753f6019..7323d88302 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -29,8 +29,8 @@ struct virtio_user_dev {
enum virtio_user_backend_type backend_type;
bool is_server; /* server or client mode */
- int callfds[VIRTIO_MAX_VIRTQUEUES];
- int kickfds[VIRTIO_MAX_VIRTQUEUES];
+ int *callfds;
+ int *kickfds;
int mac_specified;
uint16_t max_queue_pairs;
uint16_t queue_pairs;
@@ -48,11 +48,13 @@ struct virtio_user_dev {
char *ifname;
union {
- struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
- struct vring_packed packed_vrings[VIRTIO_MAX_VIRTQUEUES];
- };
- struct virtio_user_queue packed_queues[VIRTIO_MAX_VIRTQUEUES];
- bool qp_enabled[VIRTIO_MAX_VIRTQUEUE_PAIRS];
+ void *ptr;
+ struct vring *split;
+ struct vring_packed *packed;
+ } vrings;
+
+ struct virtio_user_queue *packed_queues;
+ bool *qp_enabled;
struct virtio_user_backend_ops *ops;
pthread_mutex_t mutex;
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index d23959e836..b1fc4d5d30 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -186,7 +186,7 @@ virtio_user_setup_queue_packed(struct virtqueue *vq,
uint64_t used_addr;
uint16_t i;
- vring = &dev->packed_vrings[queue_idx];
+ vring = &dev->vrings.packed[queue_idx];
desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
avail_addr = desc_addr + vq->vq_nentries *
sizeof(struct vring_packed_desc);
@@ -216,10 +216,10 @@ virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
ring[vq->vq_nentries]),
VIRTIO_VRING_ALIGN);
- dev->vrings[queue_idx].num = vq->vq_nentries;
- dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
- dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
- dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
+ dev->vrings.split[queue_idx].num = vq->vq_nentries;
+ dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr;
+ dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr;
+ dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr;
}
static int
@@ -619,13 +619,6 @@ virtio_user_pmd_probe(struct rte_vdev_device *vdev)
}
}
- if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
- PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u",
- VIRTIO_USER_ARG_QUEUES_NUM, queues,
- VIRTIO_MAX_VIRTQUEUE_PAIRS);
- goto end;
- }
-
if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) {
if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF,
&get_integer_arg, &mrg_rxbuf) < 0) {
--
2.38.1
next prev parent reply other threads:[~2022-11-30 15:59 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-30 15:56 [PATCH v1 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 01/21] net/virtio: move CVQ code into a dedicated file Maxime Coquelin
2023-01-30 7:50 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 02/21] net/virtio: introduce notify callback for control queue Maxime Coquelin
2023-01-30 7:51 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 03/21] net/virtio: virtqueue headers alloc refactoring Maxime Coquelin
2023-01-30 7:51 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 04/21] net/virtio: remove port ID info from Rx queue Maxime Coquelin
2023-01-30 7:51 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 05/21] net/virtio: remove unused fields in Tx queue struct Maxime Coquelin
2023-01-30 7:51 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 06/21] net/virtio: remove unused queue ID field in Rx queue Maxime Coquelin
2023-01-30 7:52 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 07/21] net/virtio: remove unused Port ID in control queue Maxime Coquelin
2023-01-30 7:52 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 08/21] net/virtio: move vring memzone to virtqueue struct Maxime Coquelin
2023-01-30 7:52 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 09/21] net/virtio: refactor indirect desc headers init Maxime Coquelin
2023-01-30 7:52 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 10/21] net/virtio: alloc Rx SW ring only if vectorized path Maxime Coquelin
2023-01-30 7:49 ` Xia, Chenbo
2023-02-07 10:12 ` Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 11/21] net/virtio: extract virtqueue init from virtio queue init Maxime Coquelin
2023-01-30 7:53 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 12/21] net/virtio-user: fix device starting failure handling Maxime Coquelin
2023-01-31 5:20 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 13/21] net/virtio-user: simplify queues setup Maxime Coquelin
2023-01-31 5:21 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 14/21] net/virtio-user: use proper type for number of queue pairs Maxime Coquelin
2023-01-31 5:21 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 15/21] net/virtio-user: get max number of queue pairs from device Maxime Coquelin
2023-01-31 5:21 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 16/21] net/virtio-user: allocate shadow control queue Maxime Coquelin
2023-01-31 5:21 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 17/21] net/virtio-user: send shadow virtqueue info to the backend Maxime Coquelin
2023-01-31 5:22 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 18/21] net/virtio-user: add new callback to enable control queue Maxime Coquelin
2023-01-31 5:22 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 19/21] net/virtio-user: forward control messages to shadow queue Maxime Coquelin
2022-11-30 16:54 ` Stephen Hemminger
2022-12-06 12:58 ` Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 20/21] net/virtio-user: advertize control VQ support with vDPA Maxime Coquelin
2023-01-31 5:24 ` Xia, Chenbo
2022-11-30 15:56 ` Maxime Coquelin [this message]
2023-01-31 5:19 ` [PATCH v1 21/21] net/virtio-user: remove max queues limitation Xia, Chenbo
2023-02-07 14:14 ` Maxime Coquelin
2023-01-30 5:57 ` [PATCH v1 00/21] Add control queue & MQ support to Virtio-user vDPA Xia, Chenbo
2023-02-07 10:08 ` Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221130155639.150553-22-maxime.coquelin@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=chenbo.xia@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=eperezma@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).