patches for DPDK stable branches
 help / color / mirror / Atom feed
* [RFC 01/15] examples/vdpa: fix vDPA device remove
       [not found] <20220408075606.33056-1-lizh@nvidia.com>
@ 2022-04-08  7:55 ` Li Zhang
       [not found] ` <20220606112109.208873-1-lizh@nvidia.com>
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 14+ messages in thread
From: Li Zhang @ 2022-04-08  7:55 UTC (permalink / raw)
  To: matan, viacheslavo, orika, thomas, Maxime Coquelin, Chenbo Xia,
	Xiaolong Ye, Xiao Wang
  Cc: dev, rasland, Yajun Wu, stable

From: Yajun Wu <yajunw@nvidia.com>

Add calling rte_dev_remove in vDPA example application exit. Otherwise
rte_dev_remove never get called.

Fixes: edbed86d1cc ("examples/vdpa: introduce a new sample for vDPA")
Cc: stable@dpdk.org

Signed-off-by: Yajun Wu <yajunw@nvidia.com>
---
 examples/vdpa/main.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/examples/vdpa/main.c b/examples/vdpa/main.c
index bd66deca85..19753f6e09 100644
--- a/examples/vdpa/main.c
+++ b/examples/vdpa/main.c
@@ -593,6 +593,10 @@ main(int argc, char *argv[])
 		vdpa_sample_quit();
 	}
 
+	RTE_DEV_FOREACH(dev, "class=vdpa", &dev_iter) {
+		rte_dev_remove(dev);
+	}
+
 	/* clean up the EAL */
 	rte_eal_cleanup();
 
-- 
2.27.0


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v1 01/17] vdpa/mlx5: fix usage of capability for max number of virtqs
       [not found] ` <20220606112109.208873-1-lizh@nvidia.com>
@ 2022-06-06 11:20   ` Li Zhang
  2022-06-06 11:20   ` [PATCH v1 02/17] eal: add device removal in rte cleanup Li Zhang
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 14+ messages in thread
From: Li Zhang @ 2022-06-06 11:20 UTC (permalink / raw)
  To: orika, viacheslavo, matan, shahafs, Maxime Coquelin
  Cc: dev, thomas, rasland, roniba, stable

The driver wrongly takes the capability value for
the number of virtq pairs instead of just the number of virtqs.

Adjust all the usages of it to be the number of virtqs.

Fixes: c2eb33a ("vdpa/mlx5: manage virtqs by array")
Cc: stable@dpdk.org

Signed-off-by: Li Zhang <lizh@nvidia.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       | 12 ++++++------
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c |  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index 76fa5d4299..ee71339b78 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -84,7 +84,7 @@ mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -1;
 	}
-	*queue_num = priv->caps.max_num_virtio_queues;
+	*queue_num = priv->caps.max_num_virtio_queues / 2;
 	return 0;
 }
 
@@ -141,7 +141,7 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -EINVAL;
 	}
-	if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (vring >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d.", vring);
 		return -E2BIG;
 	}
@@ -388,7 +388,7 @@ mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -411,7 +411,7 @@ mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -624,7 +624,7 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
 		DRV_LOG(DEBUG, "No capability to support virtq statistics.");
 	priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
 			   sizeof(struct mlx5_vdpa_virtq) *
-			   attr->vdpa.max_num_virtio_queues * 2,
+			   attr->vdpa.max_num_virtio_queues,
 			   RTE_CACHE_LINE_SIZE);
 	if (!priv) {
 		DRV_LOG(ERR, "Failed to allocate private memory.");
@@ -685,7 +685,7 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
 	uint32_t i;
 
 	mlx5_vdpa_dev_cache_clean(priv);
-	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
 		if (!priv->virtqs[i].counters)
 			continue;
 		claim_zero(mlx5_devx_cmd_destroy(priv->virtqs[i].counters));
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index e025be47d2..c258eb3024 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -72,7 +72,7 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
 {
 	unsigned int i, j;
 
-	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
 		struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
 
 		for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
@@ -492,9 +492,9 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 		DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
 		priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
 	}
-	if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
+	if (nr_vring > priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
-			(int)priv->caps.max_num_virtio_queues * 2,
+			(int)priv->caps.max_num_virtio_queues,
 			(int)nr_vring);
 		return -1;
 	}
-- 
2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v1 02/17] eal: add device removal in rte cleanup
       [not found] ` <20220606112109.208873-1-lizh@nvidia.com>
  2022-06-06 11:20   ` [PATCH v1 01/17] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
@ 2022-06-06 11:20   ` Li Zhang
  2022-06-06 11:20   ` [PATCH 02/16] examples/vdpa: fix vDPA device remove Li Zhang
  2022-06-06 11:20   ` [PATCH v1 03/17] examples/vdpa: fix devices cleanup Li Zhang
  3 siblings, 0 replies; 14+ messages in thread
From: Li Zhang @ 2022-06-06 11:20 UTC (permalink / raw)
  To: orika, viacheslavo, matan, shahafs, Bruce Richardson,
	Dmitry Kozlyuk, Narcisa Ana Maria Vasile, Dmitry Malloy,
	Pallavi Kadam
  Cc: dev, thomas, rasland, roniba, Yajun Wu, stable

From: Yajun Wu <yajunw@nvidia.com>

Add device removal in function rte_eal_cleanup. This is the last chance
device remove get called for sanity. Loop vdev bus first and then all bus
for all device, calling rte_dev_remove.

Cc: stable@dpdk.org

Signed-off-by: Yajun Wu <yajunw@nvidia.com>
---
 lib/eal/freebsd/eal.c     | 33 +++++++++++++++++++++++++++++++++
 lib/eal/include/rte_dev.h |  6 ++++++
 lib/eal/linux/eal.c       | 33 +++++++++++++++++++++++++++++++++
 lib/eal/windows/eal.c     | 33 +++++++++++++++++++++++++++++++++
 4 files changed, 105 insertions(+)

diff --git a/lib/eal/freebsd/eal.c b/lib/eal/freebsd/eal.c
index a6b20960f2..5ffd9146b6 100644
--- a/lib/eal/freebsd/eal.c
+++ b/lib/eal/freebsd/eal.c
@@ -886,11 +886,44 @@ rte_eal_init(int argc, char **argv)
 	return fctret;
 }
 
+static int
+bus_match_all(const struct rte_bus *bus, const void *data)
+{
+	RTE_SET_USED(bus);
+	RTE_SET_USED(data);
+	return 0;
+}
+
+static void
+remove_all_device(void)
+{
+	struct rte_bus *start = NULL, *next;
+	struct rte_dev_iterator dev_iter = {0};
+	struct rte_device *dev = NULL;
+	struct rte_device *tdev = NULL;
+	char devstr[128];
+
+	RTE_DEV_FOREACH_SAFE(dev, "bus=vdev", &dev_iter, tdev) {
+		(void)rte_dev_remove(dev);
+	}
+	while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
+		start = next;
+		/* Skip buses that don't have iterate method */
+		if (!next->dev_iterate || !next->name)
+			continue;
+		snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
+		RTE_DEV_FOREACH_SAFE(dev, devstr, &dev_iter, tdev) {
+			(void)rte_dev_remove(dev);
+		}
+	};
+}
+
 int
 rte_eal_cleanup(void)
 {
 	struct internal_config *internal_conf =
 		eal_get_internal_configuration();
+	remove_all_device();
 	rte_service_finalize();
 	rte_mp_channel_cleanup();
 	/* after this point, any DPDK pointers will become dangling */
diff --git a/lib/eal/include/rte_dev.h b/lib/eal/include/rte_dev.h
index e6ff1218f9..382d548ea3 100644
--- a/lib/eal/include/rte_dev.h
+++ b/lib/eal/include/rte_dev.h
@@ -492,6 +492,12 @@ int
 rte_dev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova,
 		  size_t len);
 
+#define RTE_DEV_FOREACH_SAFE(dev, devstr, it, tdev) \
+	for (rte_dev_iterator_init(it, devstr), \
+		(dev) = rte_dev_iterator_next(it); \
+		(dev) && ((tdev) = rte_dev_iterator_next(it), 1); \
+		(dev) = (tdev))
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c
index 1ef263434a..30b295916e 100644
--- a/lib/eal/linux/eal.c
+++ b/lib/eal/linux/eal.c
@@ -1248,6 +1248,38 @@ mark_freeable(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
 	return 0;
 }
 
+static int
+bus_match_all(const struct rte_bus *bus, const void *data)
+{
+	RTE_SET_USED(bus);
+	RTE_SET_USED(data);
+	return 0;
+}
+
+static void
+remove_all_device(void)
+{
+	struct rte_bus *start = NULL, *next;
+	struct rte_dev_iterator dev_iter = {0};
+	struct rte_device *dev = NULL;
+	struct rte_device *tdev = NULL;
+	char devstr[128];
+
+	RTE_DEV_FOREACH_SAFE(dev, "bus=vdev", &dev_iter, tdev) {
+		(void)rte_dev_remove(dev);
+	}
+	while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
+		start = next;
+		/* Skip buses that don't have iterate method */
+		if (!next->dev_iterate || !next->name)
+			continue;
+		snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
+		RTE_DEV_FOREACH_SAFE(dev, devstr, &dev_iter, tdev) {
+			(void)rte_dev_remove(dev);
+		}
+	};
+}
+
 int
 rte_eal_cleanup(void)
 {
@@ -1257,6 +1289,7 @@ rte_eal_cleanup(void)
 	struct internal_config *internal_conf =
 		eal_get_internal_configuration();
 
+	remove_all_device();
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
 			internal_conf->hugepage_file.unlink_existing)
 		rte_memseg_walk(mark_freeable, NULL);
diff --git a/lib/eal/windows/eal.c b/lib/eal/windows/eal.c
index 122de2a319..3d7d411293 100644
--- a/lib/eal/windows/eal.c
+++ b/lib/eal/windows/eal.c
@@ -254,12 +254,45 @@ __rte_trace_point_register(rte_trace_point_t *trace, const char *name,
 	return -ENOTSUP;
 }
 
+static int
+bus_match_all(const struct rte_bus *bus, const void *data)
+{
+	RTE_SET_USED(bus);
+	RTE_SET_USED(data);
+	return 0;
+}
+
+static void
+remove_all_device(void)
+{
+	struct rte_bus *start = NULL, *next;
+	struct rte_dev_iterator dev_iter = {0};
+	struct rte_device *dev = NULL;
+	struct rte_device *tdev = NULL;
+	char devstr[128];
+
+	RTE_DEV_FOREACH_SAFE(dev, "bus=vdev", &dev_iter, tdev) {
+		(void)rte_dev_remove(dev);
+	}
+	while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
+		start = next;
+		/* Skip buses that don't have iterate method */
+		if (!next->dev_iterate || !next->name)
+			continue;
+		snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
+		RTE_DEV_FOREACH_SAFE(dev, devstr, &dev_iter, tdev) {
+			(void)rte_dev_remove(dev);
+		}
+	};
+}
+
 int
 rte_eal_cleanup(void)
 {
 	struct internal_config *internal_conf =
 		eal_get_internal_configuration();
 
+	remove_all_device();
 	eal_intr_thread_cancel();
 	eal_mem_virt2iova_cleanup();
 	/* after this point, any DPDK pointers will become dangling */
-- 
2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 02/16] examples/vdpa: fix vDPA device remove
       [not found] ` <20220606112109.208873-1-lizh@nvidia.com>
  2022-06-06 11:20   ` [PATCH v1 01/17] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
  2022-06-06 11:20   ` [PATCH v1 02/17] eal: add device removal in rte cleanup Li Zhang
@ 2022-06-06 11:20   ` Li Zhang
  2022-06-06 11:20   ` [PATCH v1 03/17] examples/vdpa: fix devices cleanup Li Zhang
  3 siblings, 0 replies; 14+ messages in thread
From: Li Zhang @ 2022-06-06 11:20 UTC (permalink / raw)
  To: orika, viacheslavo, matan, shahafs, Maxime Coquelin, Chenbo Xia,
	Xiaolong Ye, Xiao Wang
  Cc: dev, thomas, rasland, roniba, Yajun Wu, stable

From: Yajun Wu <yajunw@nvidia.com>

Add calling rte_dev_remove in vDPA example application exit. Otherwise
rte_dev_remove never get called.

Fixes: edbed86d1cc ("examples/vdpa: introduce a new sample for vDPA")
Cc: stable@dpdk.org

Signed-off-by: Yajun Wu <yajunw@nvidia.com>
---
 examples/vdpa/main.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/examples/vdpa/main.c b/examples/vdpa/main.c
index 7e11ef4e26..534f1e9715 100644
--- a/examples/vdpa/main.c
+++ b/examples/vdpa/main.c
@@ -632,6 +632,10 @@ main(int argc, char *argv[])
 		vdpa_sample_quit();
 	}
 
+	RTE_DEV_FOREACH(dev, "class=vdpa", &dev_iter) {
+		rte_dev_remove(dev);
+	}
+
 	/* clean up the EAL */
 	rte_eal_cleanup();
 
-- 
2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v1 03/17] examples/vdpa: fix devices cleanup
       [not found] ` <20220606112109.208873-1-lizh@nvidia.com>
                     ` (2 preceding siblings ...)
  2022-06-06 11:20   ` [PATCH 02/16] examples/vdpa: fix vDPA device remove Li Zhang
@ 2022-06-06 11:20   ` Li Zhang
  3 siblings, 0 replies; 14+ messages in thread
From: Li Zhang @ 2022-06-06 11:20 UTC (permalink / raw)
  To: orika, viacheslavo, matan, shahafs, Maxime Coquelin, Chenbo Xia,
	Chengchang Tang
  Cc: dev, thomas, rasland, roniba, Yajun Wu, stable

From: Yajun Wu <yajunw@nvidia.com>

Move rte_eal_cleanup to function vdpa_sample_quit which
handling all example app quit.
Otherwise rte_eal_cleanup won't be called on receiving signal
like SIGINT(control + c).

Fixes: 10aa3757 ("examples: add eal cleanup to examples")
Cc: stable@dpdk.org

Signed-off-by: Yajun Wu <yajunw@nvidia.com>
---
 examples/vdpa/main.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/examples/vdpa/main.c b/examples/vdpa/main.c
index 7e11ef4e26..62e32b633d 100644
--- a/examples/vdpa/main.c
+++ b/examples/vdpa/main.c
@@ -286,6 +286,8 @@ vdpa_sample_quit(void)
 		if (vports[i].ifname[0] != '\0')
 			close_vdpa(&vports[i]);
 	}
+	/* clean up the EAL */
+	rte_eal_cleanup();
 }
 
 static void
@@ -632,8 +634,5 @@ main(int argc, char *argv[])
 		vdpa_sample_quit();
 	}
 
-	/* clean up the EAL */
-	rte_eal_cleanup();
-
 	return 0;
 }
-- 
2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v1 01/17] vdpa/mlx5: fix usage of capability for max number of virtqs
       [not found] ` <20220606114650.209612-1-lizh@nvidia.com>
@ 2022-06-06 11:46   ` Li Zhang
  2022-06-06 11:46   ` [PATCH v1 02/17] eal: add device removal in rte cleanup Li Zhang
  2022-06-06 11:46   ` [PATCH v1 03/17] examples/vdpa: fix devices cleanup Li Zhang
  2 siblings, 0 replies; 14+ messages in thread
From: Li Zhang @ 2022-06-06 11:46 UTC (permalink / raw)
  To: orika, viacheslavo, matan, shahafs, Maxime Coquelin
  Cc: dev, thomas, rasland, roniba, stable

The driver wrongly takes the capability value for
the number of virtq pairs instead of just the number of virtqs.

Adjust all the usages of it to be the number of virtqs.

Fixes: c2eb33a ("vdpa/mlx5: manage virtqs by array")
Cc: stable@dpdk.org

Signed-off-by: Li Zhang <lizh@nvidia.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       | 12 ++++++------
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c |  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index 76fa5d4299..ee71339b78 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -84,7 +84,7 @@ mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -1;
 	}
-	*queue_num = priv->caps.max_num_virtio_queues;
+	*queue_num = priv->caps.max_num_virtio_queues / 2;
 	return 0;
 }
 
@@ -141,7 +141,7 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -EINVAL;
 	}
-	if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (vring >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d.", vring);
 		return -E2BIG;
 	}
@@ -388,7 +388,7 @@ mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -411,7 +411,7 @@ mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -624,7 +624,7 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
 		DRV_LOG(DEBUG, "No capability to support virtq statistics.");
 	priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
 			   sizeof(struct mlx5_vdpa_virtq) *
-			   attr->vdpa.max_num_virtio_queues * 2,
+			   attr->vdpa.max_num_virtio_queues,
 			   RTE_CACHE_LINE_SIZE);
 	if (!priv) {
 		DRV_LOG(ERR, "Failed to allocate private memory.");
@@ -685,7 +685,7 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
 	uint32_t i;
 
 	mlx5_vdpa_dev_cache_clean(priv);
-	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
 		if (!priv->virtqs[i].counters)
 			continue;
 		claim_zero(mlx5_devx_cmd_destroy(priv->virtqs[i].counters));
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index e025be47d2..c258eb3024 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -72,7 +72,7 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
 {
 	unsigned int i, j;
 
-	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
 		struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
 
 		for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
@@ -492,9 +492,9 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 		DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
 		priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
 	}
-	if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
+	if (nr_vring > priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
-			(int)priv->caps.max_num_virtio_queues * 2,
+			(int)priv->caps.max_num_virtio_queues,
 			(int)nr_vring);
 		return -1;
 	}
-- 
2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v1 02/17] eal: add device removal in rte cleanup
       [not found] ` <20220606114650.209612-1-lizh@nvidia.com>
  2022-06-06 11:46   ` [PATCH v1 01/17] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
@ 2022-06-06 11:46   ` Li Zhang
  2022-06-06 11:46   ` [PATCH v1 03/17] examples/vdpa: fix devices cleanup Li Zhang
  2 siblings, 0 replies; 14+ messages in thread
From: Li Zhang @ 2022-06-06 11:46 UTC (permalink / raw)
  To: orika, viacheslavo, matan, shahafs, Bruce Richardson,
	Dmitry Kozlyuk, Narcisa Ana Maria Vasile, Dmitry Malloy,
	Pallavi Kadam
  Cc: dev, thomas, rasland, roniba, Yajun Wu, stable

From: Yajun Wu <yajunw@nvidia.com>

Add device removal in function rte_eal_cleanup. This is the last chance
device remove get called for sanity. Loop vdev bus first and then all bus
for all device, calling rte_dev_remove.

Cc: stable@dpdk.org

Signed-off-by: Yajun Wu <yajunw@nvidia.com>
---
 lib/eal/freebsd/eal.c     | 33 +++++++++++++++++++++++++++++++++
 lib/eal/include/rte_dev.h |  6 ++++++
 lib/eal/linux/eal.c       | 33 +++++++++++++++++++++++++++++++++
 lib/eal/windows/eal.c     | 33 +++++++++++++++++++++++++++++++++
 4 files changed, 105 insertions(+)

diff --git a/lib/eal/freebsd/eal.c b/lib/eal/freebsd/eal.c
index a6b20960f2..5ffd9146b6 100644
--- a/lib/eal/freebsd/eal.c
+++ b/lib/eal/freebsd/eal.c
@@ -886,11 +886,44 @@ rte_eal_init(int argc, char **argv)
 	return fctret;
 }
 
+static int
+bus_match_all(const struct rte_bus *bus, const void *data)
+{
+	RTE_SET_USED(bus);
+	RTE_SET_USED(data);
+	return 0;
+}
+
+static void
+remove_all_device(void)
+{
+	struct rte_bus *start = NULL, *next;
+	struct rte_dev_iterator dev_iter = {0};
+	struct rte_device *dev = NULL;
+	struct rte_device *tdev = NULL;
+	char devstr[128];
+
+	RTE_DEV_FOREACH_SAFE(dev, "bus=vdev", &dev_iter, tdev) {
+		(void)rte_dev_remove(dev);
+	}
+	while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
+		start = next;
+		/* Skip buses that don't have iterate method */
+		if (!next->dev_iterate || !next->name)
+			continue;
+		snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
+		RTE_DEV_FOREACH_SAFE(dev, devstr, &dev_iter, tdev) {
+			(void)rte_dev_remove(dev);
+		}
+	};
+}
+
 int
 rte_eal_cleanup(void)
 {
 	struct internal_config *internal_conf =
 		eal_get_internal_configuration();
+	remove_all_device();
 	rte_service_finalize();
 	rte_mp_channel_cleanup();
 	/* after this point, any DPDK pointers will become dangling */
diff --git a/lib/eal/include/rte_dev.h b/lib/eal/include/rte_dev.h
index e6ff1218f9..382d548ea3 100644
--- a/lib/eal/include/rte_dev.h
+++ b/lib/eal/include/rte_dev.h
@@ -492,6 +492,12 @@ int
 rte_dev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova,
 		  size_t len);
 
+#define RTE_DEV_FOREACH_SAFE(dev, devstr, it, tdev) \
+	for (rte_dev_iterator_init(it, devstr), \
+		(dev) = rte_dev_iterator_next(it); \
+		(dev) && ((tdev) = rte_dev_iterator_next(it), 1); \
+		(dev) = (tdev))
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c
index 1ef263434a..30b295916e 100644
--- a/lib/eal/linux/eal.c
+++ b/lib/eal/linux/eal.c
@@ -1248,6 +1248,38 @@ mark_freeable(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
 	return 0;
 }
 
+static int
+bus_match_all(const struct rte_bus *bus, const void *data)
+{
+	RTE_SET_USED(bus);
+	RTE_SET_USED(data);
+	return 0;
+}
+
+static void
+remove_all_device(void)
+{
+	struct rte_bus *start = NULL, *next;
+	struct rte_dev_iterator dev_iter = {0};
+	struct rte_device *dev = NULL;
+	struct rte_device *tdev = NULL;
+	char devstr[128];
+
+	RTE_DEV_FOREACH_SAFE(dev, "bus=vdev", &dev_iter, tdev) {
+		(void)rte_dev_remove(dev);
+	}
+	while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
+		start = next;
+		/* Skip buses that don't have iterate method */
+		if (!next->dev_iterate || !next->name)
+			continue;
+		snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
+		RTE_DEV_FOREACH_SAFE(dev, devstr, &dev_iter, tdev) {
+			(void)rte_dev_remove(dev);
+		}
+	};
+}
+
 int
 rte_eal_cleanup(void)
 {
@@ -1257,6 +1289,7 @@ rte_eal_cleanup(void)
 	struct internal_config *internal_conf =
 		eal_get_internal_configuration();
 
+	remove_all_device();
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
 			internal_conf->hugepage_file.unlink_existing)
 		rte_memseg_walk(mark_freeable, NULL);
diff --git a/lib/eal/windows/eal.c b/lib/eal/windows/eal.c
index 122de2a319..3d7d411293 100644
--- a/lib/eal/windows/eal.c
+++ b/lib/eal/windows/eal.c
@@ -254,12 +254,45 @@ __rte_trace_point_register(rte_trace_point_t *trace, const char *name,
 	return -ENOTSUP;
 }
 
+static int
+bus_match_all(const struct rte_bus *bus, const void *data)
+{
+	RTE_SET_USED(bus);
+	RTE_SET_USED(data);
+	return 0;
+}
+
+static void
+remove_all_device(void)
+{
+	struct rte_bus *start = NULL, *next;
+	struct rte_dev_iterator dev_iter = {0};
+	struct rte_device *dev = NULL;
+	struct rte_device *tdev = NULL;
+	char devstr[128];
+
+	RTE_DEV_FOREACH_SAFE(dev, "bus=vdev", &dev_iter, tdev) {
+		(void)rte_dev_remove(dev);
+	}
+	while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) {
+		start = next;
+		/* Skip buses that don't have iterate method */
+		if (!next->dev_iterate || !next->name)
+			continue;
+		snprintf(devstr, sizeof(devstr), "bus=%s", next->name);
+		RTE_DEV_FOREACH_SAFE(dev, devstr, &dev_iter, tdev) {
+			(void)rte_dev_remove(dev);
+		}
+	};
+}
+
 int
 rte_eal_cleanup(void)
 {
 	struct internal_config *internal_conf =
 		eal_get_internal_configuration();
 
+	remove_all_device();
 	eal_intr_thread_cancel();
 	eal_mem_virt2iova_cleanup();
 	/* after this point, any DPDK pointers will become dangling */
-- 
2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v1 03/17] examples/vdpa: fix devices cleanup
       [not found] ` <20220606114650.209612-1-lizh@nvidia.com>
  2022-06-06 11:46   ` [PATCH v1 01/17] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
  2022-06-06 11:46   ` [PATCH v1 02/17] eal: add device removal in rte cleanup Li Zhang
@ 2022-06-06 11:46   ` Li Zhang
  2 siblings, 0 replies; 14+ messages in thread
From: Li Zhang @ 2022-06-06 11:46 UTC (permalink / raw)
  To: orika, viacheslavo, matan, shahafs, Maxime Coquelin, Chenbo Xia,
	Chengchang Tang
  Cc: dev, thomas, rasland, roniba, Yajun Wu, stable

From: Yajun Wu <yajunw@nvidia.com>

Move rte_eal_cleanup to function vdpa_sample_quit which
handling all example app quit.
Otherwise rte_eal_cleanup won't be called on receiving signal
like SIGINT(control + c).

Fixes: 10aa3757 ("examples: add eal cleanup to examples")
Cc: stable@dpdk.org

Signed-off-by: Yajun Wu <yajunw@nvidia.com>
---
 examples/vdpa/main.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/examples/vdpa/main.c b/examples/vdpa/main.c
index 7e11ef4e26..62e32b633d 100644
--- a/examples/vdpa/main.c
+++ b/examples/vdpa/main.c
@@ -286,6 +286,8 @@ vdpa_sample_quit(void)
 		if (vports[i].ifname[0] != '\0')
 			close_vdpa(&vports[i]);
 	}
+	/* clean up the EAL */
+	rte_eal_cleanup();
 }
 
 static void
@@ -632,8 +634,5 @@ main(int argc, char *argv[])
 		vdpa_sample_quit();
 	}
 
-	/* clean up the EAL */
-	rte_eal_cleanup();
-
 	return 0;
 }
-- 
2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v2 01/15] vdpa/mlx5: fix usage of capability for max number of virtqs
       [not found] ` <20220616023012.16013-1-lizh@nvidia.com>
@ 2022-06-16  2:29   ` Li Zhang
  2022-06-17 14:27     ` Maxime Coquelin
  0 siblings, 1 reply; 14+ messages in thread
From: Li Zhang @ 2022-06-16  2:29 UTC (permalink / raw)
  To: orika, viacheslavo, matan, shahafs, Maxime Coquelin
  Cc: dev, thomas, rasland, roniba, stable

The driver wrongly takes the capability value for
the number of virtq pairs instead of just the number of virtqs.

Adjust all the usages of it to be the number of virtqs.

Fixes: c2eb33a ("vdpa/mlx5: manage virtqs by array")
Cc: stable@dpdk.org

Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       | 12 ++++++------
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c |  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index 76fa5d4299..ee71339b78 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -84,7 +84,7 @@ mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -1;
 	}
-	*queue_num = priv->caps.max_num_virtio_queues;
+	*queue_num = priv->caps.max_num_virtio_queues / 2;
 	return 0;
 }
 
@@ -141,7 +141,7 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -EINVAL;
 	}
-	if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (vring >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d.", vring);
 		return -E2BIG;
 	}
@@ -388,7 +388,7 @@ mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -411,7 +411,7 @@ mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -624,7 +624,7 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
 		DRV_LOG(DEBUG, "No capability to support virtq statistics.");
 	priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
 			   sizeof(struct mlx5_vdpa_virtq) *
-			   attr->vdpa.max_num_virtio_queues * 2,
+			   attr->vdpa.max_num_virtio_queues,
 			   RTE_CACHE_LINE_SIZE);
 	if (!priv) {
 		DRV_LOG(ERR, "Failed to allocate private memory.");
@@ -685,7 +685,7 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
 	uint32_t i;
 
 	mlx5_vdpa_dev_cache_clean(priv);
-	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
 		if (!priv->virtqs[i].counters)
 			continue;
 		claim_zero(mlx5_devx_cmd_destroy(priv->virtqs[i].counters));
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index e025be47d2..c258eb3024 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -72,7 +72,7 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
 {
 	unsigned int i, j;
 
-	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
 		struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
 
 		for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
@@ -492,9 +492,9 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 		DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
 		priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
 	}
-	if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
+	if (nr_vring > priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
-			(int)priv->caps.max_num_virtio_queues * 2,
+			(int)priv->caps.max_num_virtio_queues,
 			(int)nr_vring);
 		return -1;
 	}
-- 
2.30.2


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v2 01/15] vdpa/mlx5: fix usage of capability for max number of virtqs
  2022-06-16  2:29   ` [PATCH v2 01/15] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
@ 2022-06-17 14:27     ` Maxime Coquelin
  0 siblings, 0 replies; 14+ messages in thread
From: Maxime Coquelin @ 2022-06-17 14:27 UTC (permalink / raw)
  To: Li Zhang, orika, viacheslavo, matan, shahafs
  Cc: dev, thomas, rasland, roniba, stable



On 6/16/22 04:29, Li Zhang wrote:
> The driver wrongly takes the capability value for
> the number of virtq pairs instead of just the number of virtqs.
> 
> Adjust all the usages of it to be the number of virtqs.
> 
> Fixes: c2eb33a ("vdpa/mlx5: manage virtqs by array")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Li Zhang <lizh@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
> ---
>   drivers/vdpa/mlx5/mlx5_vdpa.c       | 12 ++++++------
>   drivers/vdpa/mlx5/mlx5_vdpa_virtq.c |  6 +++---
>   2 files changed, 9 insertions(+), 9 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v3 01/15] vdpa/mlx5: fix usage of capability for max number of virtqs
       [not found] ` <20220618084805.87315-1-lizh@nvidia.com>
@ 2022-06-18  8:47   ` Li Zhang
  0 siblings, 0 replies; 14+ messages in thread
From: Li Zhang @ 2022-06-18  8:47 UTC (permalink / raw)
  To: orika, viacheslavo, matan, shahafs, Maxime Coquelin
  Cc: dev, thomas, rasland, roniba, stable

The driver wrongly takes the capability value for
the number of virtq pairs instead of just the number of virtqs.

Adjust all the usages of it to be the number of virtqs.

Fixes: c2eb33a ("vdpa/mlx5: manage virtqs by array")
Cc: stable@dpdk.org

Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       | 12 ++++++------
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c |  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index 76fa5d4299..ee71339b78 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -84,7 +84,7 @@ mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -1;
 	}
-	*queue_num = priv->caps.max_num_virtio_queues;
+	*queue_num = priv->caps.max_num_virtio_queues / 2;
 	return 0;
 }
 
@@ -141,7 +141,7 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -EINVAL;
 	}
-	if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (vring >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d.", vring);
 		return -E2BIG;
 	}
@@ -388,7 +388,7 @@ mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -411,7 +411,7 @@ mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -624,7 +624,7 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
 		DRV_LOG(DEBUG, "No capability to support virtq statistics.");
 	priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
 			   sizeof(struct mlx5_vdpa_virtq) *
-			   attr->vdpa.max_num_virtio_queues * 2,
+			   attr->vdpa.max_num_virtio_queues,
 			   RTE_CACHE_LINE_SIZE);
 	if (!priv) {
 		DRV_LOG(ERR, "Failed to allocate private memory.");
@@ -685,7 +685,7 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
 	uint32_t i;
 
 	mlx5_vdpa_dev_cache_clean(priv);
-	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
 		if (!priv->virtqs[i].counters)
 			continue;
 		claim_zero(mlx5_devx_cmd_destroy(priv->virtqs[i].counters));
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index e025be47d2..c258eb3024 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -72,7 +72,7 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
 {
 	unsigned int i, j;
 
-	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
 		struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
 
 		for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
@@ -492,9 +492,9 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 		DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
 		priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
 	}
-	if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
+	if (nr_vring > priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
-			(int)priv->caps.max_num_virtio_queues * 2,
+			(int)priv->caps.max_num_virtio_queues,
 			(int)nr_vring);
 		return -1;
 	}
-- 
2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v4 01/15] vdpa/mlx5: fix usage of capability for max number of virtqs
       [not found] ` <20220618090258.91157-1-lizh@nvidia.com>
@ 2022-06-18  9:02   ` Li Zhang
  0 siblings, 0 replies; 14+ messages in thread
From: Li Zhang @ 2022-06-18  9:02 UTC (permalink / raw)
  To: orika, viacheslavo, matan, shahafs, Maxime Coquelin
  Cc: dev, thomas, rasland, roniba, stable

The driver wrongly takes the capability value for
the number of virtq pairs instead of just the number of virtqs.

Adjust all the usages of it to be the number of virtqs.

Fixes: c2eb33a ("vdpa/mlx5: manage virtqs by array")
Cc: stable@dpdk.org

Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       | 12 ++++++------
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c |  6 +++---
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index 76fa5d4299..ee71339b78 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -84,7 +84,7 @@ mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -1;
 	}
-	*queue_num = priv->caps.max_num_virtio_queues;
+	*queue_num = priv->caps.max_num_virtio_queues / 2;
 	return 0;
 }
 
@@ -141,7 +141,7 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -EINVAL;
 	}
-	if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (vring >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d.", vring);
 		return -E2BIG;
 	}
@@ -388,7 +388,7 @@ mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -411,7 +411,7 @@ mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
 		DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
 		return -ENODEV;
 	}
-	if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (qid >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
 				vdev->device->name);
 		return -E2BIG;
@@ -624,7 +624,7 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
 		DRV_LOG(DEBUG, "No capability to support virtq statistics.");
 	priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
 			   sizeof(struct mlx5_vdpa_virtq) *
-			   attr->vdpa.max_num_virtio_queues * 2,
+			   attr->vdpa.max_num_virtio_queues,
 			   RTE_CACHE_LINE_SIZE);
 	if (!priv) {
 		DRV_LOG(ERR, "Failed to allocate private memory.");
@@ -685,7 +685,7 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
 	uint32_t i;
 
 	mlx5_vdpa_dev_cache_clean(priv);
-	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
 		if (!priv->virtqs[i].counters)
 			continue;
 		claim_zero(mlx5_devx_cmd_destroy(priv->virtqs[i].counters));
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index e025be47d2..c258eb3024 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -72,7 +72,7 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
 {
 	unsigned int i, j;
 
-	for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
+	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
 		struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
 
 		for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
@@ -492,9 +492,9 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 		DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
 		priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
 	}
-	if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
+	if (nr_vring > priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
-			(int)priv->caps.max_num_virtio_queues * 2,
+			(int)priv->caps.max_num_virtio_queues,
 			(int)nr_vring);
 		return -1;
 	}
-- 
2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 21.11] vdpa/mlx5: fix maximum number of virtqs
       [not found] <20220408075606.33056-1-lizh@nvidia.com>
                   ` (5 preceding siblings ...)
       [not found] ` <20220618090258.91157-1-lizh@nvidia.com>
@ 2022-06-28  3:47 ` Li Zhang
  2022-06-28 14:58   ` Kevin Traynor
  6 siblings, 1 reply; 14+ messages in thread
From: Li Zhang @ 2022-06-28  3:47 UTC (permalink / raw)
  To: orika, matan, maxime.coquelin, Viacheslav Ovsiienko
  Cc: stable, thomas, rasland, roniba

[ upstream commit 6f065d1539bed56602e3c6159c99cccb3bca38e4 ]

The driver wrongly takes the capability value for
the number of virtq pairs instead of just the number of virtqs.

Adjust all the usages of it to be the number of virtqs.

Fixes: c2eb33aaf967 ("vdpa/mlx5: manage virtqs by array")
Cc: stable@dpdk.org

Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa.c       | 6 +++---
 drivers/vdpa/mlx5/mlx5_vdpa_virtq.c | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c
index 8dfaba791d..9c1c70037c 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.c
@@ -81,7 +81,7 @@ mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -1;
 	}
-	*queue_num = priv->caps.max_num_virtio_queues;
+	*queue_num = priv->caps.max_num_virtio_queues / 2;
 	return 0;
 }
 
@@ -138,7 +138,7 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
 		DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
 		return -EINVAL;
 	}
-	if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
+	if (vring >= (int)priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Too big vring id: %d.", vring);
 		return -E2BIG;
 	}
@@ -518,7 +518,7 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev)
 		DRV_LOG(DEBUG, "No capability to support virtq statistics.");
 	priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
 			   sizeof(struct mlx5_vdpa_virtq) *
-			   attr->vdpa.max_num_virtio_queues * 2,
+			   attr->vdpa.max_num_virtio_queues,
 			   RTE_CACHE_LINE_SIZE);
 	if (!priv) {
 		DRV_LOG(ERR, "Failed to allocate private memory.");
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
index 0ef7ed0e4a..ea2ec83a1b 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c
@@ -470,9 +470,9 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
 		DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
 		priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
 	}
-	if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
+	if (nr_vring > priv->caps.max_num_virtio_queues) {
 		DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
-			(int)priv->caps.max_num_virtio_queues * 2,
+			(int)priv->caps.max_num_virtio_queues,
 			(int)nr_vring);
 		return -1;
 	}
-- 
2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 21.11] vdpa/mlx5: fix maximum number of virtqs
  2022-06-28  3:47 ` [PATCH 21.11] vdpa/mlx5: fix maximum " Li Zhang
@ 2022-06-28 14:58   ` Kevin Traynor
  0 siblings, 0 replies; 14+ messages in thread
From: Kevin Traynor @ 2022-06-28 14:58 UTC (permalink / raw)
  To: Li Zhang, orika, matan, maxime.coquelin, Viacheslav Ovsiienko
  Cc: stable, thomas, rasland, roniba

On 28/06/2022 04:47, Li Zhang wrote:
> [ upstream commit 6f065d1539bed56602e3c6159c99cccb3bca38e4 ]
> 
> The driver wrongly takes the capability value for
> the number of virtq pairs instead of just the number of virtqs.
> 
> Adjust all the usages of it to be the number of virtqs.
> 
> Fixes: c2eb33aaf967 ("vdpa/mlx5: manage virtqs by array")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Li Zhang <lizh@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---

Applied, thanks.


^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2022-06-28 14:58 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20220408075606.33056-1-lizh@nvidia.com>
2022-04-08  7:55 ` [RFC 01/15] examples/vdpa: fix vDPA device remove Li Zhang
     [not found] ` <20220606112109.208873-1-lizh@nvidia.com>
2022-06-06 11:20   ` [PATCH v1 01/17] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
2022-06-06 11:20   ` [PATCH v1 02/17] eal: add device removal in rte cleanup Li Zhang
2022-06-06 11:20   ` [PATCH 02/16] examples/vdpa: fix vDPA device remove Li Zhang
2022-06-06 11:20   ` [PATCH v1 03/17] examples/vdpa: fix devices cleanup Li Zhang
     [not found] ` <20220606114650.209612-1-lizh@nvidia.com>
2022-06-06 11:46   ` [PATCH v1 01/17] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
2022-06-06 11:46   ` [PATCH v1 02/17] eal: add device removal in rte cleanup Li Zhang
2022-06-06 11:46   ` [PATCH v1 03/17] examples/vdpa: fix devices cleanup Li Zhang
     [not found] ` <20220616023012.16013-1-lizh@nvidia.com>
2022-06-16  2:29   ` [PATCH v2 01/15] vdpa/mlx5: fix usage of capability for max number of virtqs Li Zhang
2022-06-17 14:27     ` Maxime Coquelin
     [not found] ` <20220618084805.87315-1-lizh@nvidia.com>
2022-06-18  8:47   ` [PATCH v3 " Li Zhang
     [not found] ` <20220618090258.91157-1-lizh@nvidia.com>
2022-06-18  9:02   ` [PATCH v4 " Li Zhang
2022-06-28  3:47 ` [PATCH 21.11] vdpa/mlx5: fix maximum " Li Zhang
2022-06-28 14:58   ` Kevin Traynor

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).