From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 79625A00C2;
	Wed, 30 Nov 2022 16:59:12 +0100 (CET)
Received: from mails.dpdk.org (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 780F240693;
	Wed, 30 Nov 2022 16:57:24 +0100 (CET)
Received: from us-smtp-delivery-124.mimecast.com
 (us-smtp-delivery-124.mimecast.com [170.10.129.124])
 by mails.dpdk.org (Postfix) with ESMTP id 93FED42D78
 for <dev@dpdk.org>; Wed, 30 Nov 2022 16:57:21 +0100 (CET)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com;
 s=mimecast20190719; t=1669823841;
 h=from:from:reply-to:subject:subject:date:date:message-id:message-id:
 to:to:cc:cc:mime-version:mime-version:content-type:content-type:
 content-transfer-encoding:content-transfer-encoding:
 in-reply-to:in-reply-to:references:references;
 bh=ilBgn6WyLwEOyvbF6J5Zef5rZG8UwOvFDR575inuDfA=;
 b=KZnKnJHOzJmuF8A3bE0OYELk94USeDbpLqD8mz2GIri6i37fxdmok2kS50GZwFTmKO0Osw
 1R4XuJReosidSnvL6DWuQiL1LHatup04e9fHtHH8mp8/XVoLMP8BJzI7cqj+zR9zbyLYfQ
 GZla62SCm+ZVGNizb6FINHQ6bwyChG4=
Received: from mimecast-mx02.redhat.com (mx3-rdu2.redhat.com
 [66.187.233.73]) by relay.mimecast.com with ESMTP with STARTTLS
 (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id
 us-mta-571-zrWpunDGMoKm-Sdfh9Si2w-1; Wed, 30 Nov 2022 10:57:19 -0500
X-MC-Unique: zrWpunDGMoKm-Sdfh9Si2w-1
Received: from smtp.corp.redhat.com (int-mx01.intmail.prod.int.rdu2.redhat.com
 [10.11.54.1])
 (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
 (No client certificate requested)
 by mimecast-mx02.redhat.com (Postfix) with ESMTPS id 0A7873C0E45D;
 Wed, 30 Nov 2022 15:57:19 +0000 (UTC)
Received: from max-t490s.redhat.com (unknown [10.39.208.22])
 by smtp.corp.redhat.com (Postfix) with ESMTP id DC74D40C83D9;
 Wed, 30 Nov 2022 15:57:17 +0000 (UTC)
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, david.marchand@redhat.com,
 eperezma@redhat.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH v1 21/21] net/virtio-user: remove max queues limitation
Date: Wed, 30 Nov 2022 16:56:39 +0100
Message-Id: <20221130155639.150553-22-maxime.coquelin@redhat.com>
In-Reply-To: <20221130155639.150553-1-maxime.coquelin@redhat.com>
References: <20221130155639.150553-1-maxime.coquelin@redhat.com>
MIME-Version: 1.0
X-Scanned-By: MIMEDefang 3.1 on 10.11.54.1
X-Mimecast-Spam-Score: 0
X-Mimecast-Originator: redhat.com
Content-Transfer-Encoding: 8bit
Content-Type: text/plain; charset="US-ASCII"; x-default=true
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

This patch removes the limitation of 8 queue pairs by
dynamically allocating vring metadata once we know the
maximum number of queue pairs supported by the backend.

This is especially useful for Vhost-vDPA with physical
devices, where the maximum queues supported may be much
more than 8 pairs.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio.h                   |   6 -
 .../net/virtio/virtio_user/virtio_user_dev.c  | 118 ++++++++++++++----
 .../net/virtio/virtio_user/virtio_user_dev.h  |  16 +--
 drivers/net/virtio/virtio_user_ethdev.c       |  17 +--
 4 files changed, 109 insertions(+), 48 deletions(-)

diff --git a/drivers/net/virtio/virtio.h b/drivers/net/virtio/virtio.h
index 5c8f71a44d..04a897bf51 100644
--- a/drivers/net/virtio/virtio.h
+++ b/drivers/net/virtio/virtio.h
@@ -124,12 +124,6 @@
 	VIRTIO_NET_HASH_TYPE_UDP_EX)
 
 
-/*
- * Maximum number of virtqueues per device.
- */
-#define VIRTIO_MAX_VIRTQUEUE_PAIRS 8
-#define VIRTIO_MAX_VIRTQUEUES (VIRTIO_MAX_VIRTQUEUE_PAIRS * 2 + 1)
-
 /* VirtIO device IDs. */
 #define VIRTIO_ID_NETWORK  0x01
 #define VIRTIO_ID_BLOCK    0x02
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 7c48c9bb29..aa24fdea70 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -17,6 +17,7 @@
 #include <rte_alarm.h>
 #include <rte_string_fns.h>
 #include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
 
 #include "vhost.h"
 #include "virtio_user_dev.h"
@@ -58,8 +59,8 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
 	int ret;
 	struct vhost_vring_file file;
 	struct vhost_vring_state state;
-	struct vring *vring = &dev->vrings[queue_sel];
-	struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
+	struct vring *vring = &dev->vrings.split[queue_sel];
+	struct vring_packed *pq_vring = &dev->vrings.packed[queue_sel];
 	struct vhost_vring_addr addr = {
 		.index = queue_sel,
 		.log_guest_addr = 0,
@@ -299,18 +300,6 @@ virtio_user_dev_init_max_queue_pairs(struct virtio_user_dev *dev, uint32_t user_
 		return ret;
 	}
 
-	if (dev->max_queue_pairs > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
-		/*
-		 * If the device supports control queue, the control queue
-		 * index is max_virtqueue_pairs * 2. Disable MQ if it happens.
-		 */
-		PMD_DRV_LOG(ERR, "(%s) Device advertises too many queues (%u, max supported %u)",
-				dev->path, dev->max_queue_pairs, VIRTIO_MAX_VIRTQUEUE_PAIRS);
-		dev->max_queue_pairs = 1;
-
-		return -1;
-	}
-
 	return 0;
 }
 
@@ -579,6 +568,86 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
 	return 0;
 }
 
+static int
+virtio_user_alloc_vrings(struct virtio_user_dev *dev)
+{
+	int i, size, nr_vrings;
+
+	nr_vrings = dev->max_queue_pairs * 2;
+	if (dev->hw_cvq)
+		nr_vrings++;
+
+	dev->callfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->callfds), 0);
+	if (!dev->callfds) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc callfds", dev->path);
+		return -1;
+	}
+
+	dev->kickfds = rte_zmalloc("virtio_user_dev", nr_vrings * sizeof(*dev->kickfds), 0);
+	if (!dev->kickfds) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc kickfds", dev->path);
+		goto free_callfds;
+	}
+
+	for (i = 0; i < nr_vrings; i++) {
+		dev->callfds[i] = -1;
+		dev->kickfds[i] = -1;
+	}
+
+	size = RTE_MAX(sizeof(*dev->vrings.split), sizeof(*dev->vrings.packed));
+	dev->vrings.ptr = rte_zmalloc("virtio_user_dev", nr_vrings * size, 0);
+	if (!dev->vrings.ptr) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc vrings metadata", dev->path);
+		goto free_kickfds;
+	}
+
+	dev->packed_queues = rte_zmalloc("virtio_user_dev",
+			nr_vrings * sizeof(*dev->packed_queues), 0);
+	if (!dev->packed_queues) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc packed queues metadata", dev->path);
+		goto free_vrings;
+	}
+
+	dev->qp_enabled = rte_zmalloc("virtio_user_dev",
+			dev->max_queue_pairs * sizeof(*dev->qp_enabled), 0);
+	if (!dev->qp_enabled) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to alloc QP enable states", dev->path);
+		goto free_packed_queues;
+	}
+
+	return 0;
+
+free_packed_queues:
+	rte_free(dev->packed_queues);
+	dev->packed_queues = NULL;
+free_vrings:
+	rte_free(dev->vrings.ptr);
+	dev->vrings.ptr = NULL;
+free_kickfds:
+	rte_free(dev->kickfds);
+	dev->kickfds = NULL;
+free_callfds:
+	rte_free(dev->callfds);
+	dev->callfds = NULL;
+
+	return -1;
+}
+
+static void
+virtio_user_free_vrings(struct virtio_user_dev *dev)
+{
+	rte_free(dev->qp_enabled);
+	dev->qp_enabled = NULL;
+	rte_free(dev->packed_queues);
+	dev->packed_queues = NULL;
+	rte_free(dev->vrings.ptr);
+	dev->vrings.ptr = NULL;
+	rte_free(dev->kickfds);
+	dev->kickfds = NULL;
+	rte_free(dev->callfds);
+	dev->callfds = NULL;
+}
+
 /* Use below macro to filter features from vhost backend */
 #define VIRTIO_USER_SUPPORTED_FEATURES			\
 	(1ULL << VIRTIO_NET_F_MAC		|	\
@@ -607,16 +676,10 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
 		     enum virtio_user_backend_type backend_type)
 {
 	uint64_t backend_features;
-	int i;
 
 	pthread_mutex_init(&dev->mutex, NULL);
 	strlcpy(dev->path, path, PATH_MAX);
 
-	for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; i++) {
-		dev->kickfds[i] = -1;
-		dev->callfds[i] = -1;
-	}
-
 	dev->started = 0;
 	dev->queue_pairs = 1; /* mq disabled by default */
 	dev->queue_size = queue_size;
@@ -661,9 +724,14 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
 	if (dev->max_queue_pairs > 1)
 		cq = 1;
 
+	if (virtio_user_alloc_vrings(dev) < 0) {
+		PMD_INIT_LOG(ERR, "(%s) Failed to allocate vring metadata", dev->path);
+		goto destroy;
+	}
+
 	if (virtio_user_dev_init_notify(dev) < 0) {
 		PMD_INIT_LOG(ERR, "(%s) Failed to init notifiers", dev->path);
-		goto destroy;
+		goto free_vrings;
 	}
 
 	if (virtio_user_fill_intr_handle(dev) < 0) {
@@ -722,6 +790,8 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, uint16_t queues,
 
 notify_uninit:
 	virtio_user_dev_uninit_notify(dev);
+free_vrings:
+	virtio_user_free_vrings(dev);
 destroy:
 	dev->ops->destroy(dev);
 
@@ -742,6 +812,8 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
 
 	virtio_user_dev_uninit_notify(dev);
 
+	virtio_user_free_vrings(dev);
+
 	free(dev->ifname);
 
 	if (dev->is_server)
@@ -897,7 +969,7 @@ static void
 virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
 {
 	struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
-	struct vring_packed *vring = &dev->packed_vrings[queue_idx];
+	struct vring_packed *vring = &dev->vrings.packed[queue_idx];
 	uint16_t n_descs, flags;
 
 	/* Perform a load-acquire barrier in desc_is_avail to
@@ -931,7 +1003,7 @@ virtio_user_handle_cq_split(struct virtio_user_dev *dev, uint16_t queue_idx)
 	uint16_t avail_idx, desc_idx;
 	struct vring_used_elem *uep;
 	uint32_t n_descs;
-	struct vring *vring = &dev->vrings[queue_idx];
+	struct vring *vring = &dev->vrings.split[queue_idx];
 
 	/* Consume avail ring, using used ring idx as first one */
 	while (__atomic_load_n(&vring->used->idx, __ATOMIC_RELAXED)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index e8753f6019..7323d88302 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -29,8 +29,8 @@ struct virtio_user_dev {
 	enum virtio_user_backend_type backend_type;
 	bool		is_server;  /* server or client mode */
 
-	int		callfds[VIRTIO_MAX_VIRTQUEUES];
-	int		kickfds[VIRTIO_MAX_VIRTQUEUES];
+	int		*callfds;
+	int		*kickfds;
 	int		mac_specified;
 	uint16_t	max_queue_pairs;
 	uint16_t	queue_pairs;
@@ -48,11 +48,13 @@ struct virtio_user_dev {
 	char		*ifname;
 
 	union {
-		struct vring		vrings[VIRTIO_MAX_VIRTQUEUES];
-		struct vring_packed	packed_vrings[VIRTIO_MAX_VIRTQUEUES];
-	};
-	struct virtio_user_queue packed_queues[VIRTIO_MAX_VIRTQUEUES];
-	bool		qp_enabled[VIRTIO_MAX_VIRTQUEUE_PAIRS];
+		void			*ptr;
+		struct vring		*split;
+		struct vring_packed	*packed;
+	} vrings;
+
+	struct virtio_user_queue *packed_queues;
+	bool		*qp_enabled;
 
 	struct virtio_user_backend_ops *ops;
 	pthread_mutex_t	mutex;
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index d23959e836..b1fc4d5d30 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -186,7 +186,7 @@ virtio_user_setup_queue_packed(struct virtqueue *vq,
 	uint64_t used_addr;
 	uint16_t i;
 
-	vring  = &dev->packed_vrings[queue_idx];
+	vring  = &dev->vrings.packed[queue_idx];
 	desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
 	avail_addr = desc_addr + vq->vq_nentries *
 		sizeof(struct vring_packed_desc);
@@ -216,10 +216,10 @@ virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
 							 ring[vq->vq_nentries]),
 				   VIRTIO_VRING_ALIGN);
 
-	dev->vrings[queue_idx].num = vq->vq_nentries;
-	dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
-	dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
-	dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
+	dev->vrings.split[queue_idx].num = vq->vq_nentries;
+	dev->vrings.split[queue_idx].desc = (void *)(uintptr_t)desc_addr;
+	dev->vrings.split[queue_idx].avail = (void *)(uintptr_t)avail_addr;
+	dev->vrings.split[queue_idx].used = (void *)(uintptr_t)used_addr;
 }
 
 static int
@@ -619,13 +619,6 @@ virtio_user_pmd_probe(struct rte_vdev_device *vdev)
 		}
 	}
 
-	if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
-		PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u",
-			VIRTIO_USER_ARG_QUEUES_NUM, queues,
-			VIRTIO_MAX_VIRTQUEUE_PAIRS);
-		goto end;
-	}
-
 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) {
 		if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF,
 				       &get_integer_arg, &mrg_rxbuf) < 0) {
-- 
2.38.1