DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 21.02 v1] net/virtio: fix memory init with vDPA backend
@ 2020-11-24 12:45 Maxime Coquelin
  2020-11-25  2:18 ` Jason Wang
  0 siblings, 1 reply; 3+ messages in thread
From: Maxime Coquelin @ 2020-11-24 12:45 UTC (permalink / raw)
  To: dev, chenbo.xia, amorenoz, jasowang; +Cc: Maxime Coquelin, stable

This patch fixes an overhead met with mlx5-vdpa Kernel
driver, where for every page in the mapped area, all the
memory tables gets updated. For example, with 2MB hugepages,
a single IOTLB_UPDATE for a 1GB region causes 512 memory
updates on mlx5-vdpa side.

Using batching mode, the mlx5 driver will only trigger a
single memory update for all the IOTLB updates that happen
between the batch begin and batch end commands.

Fixes: 6b901437056e ("net/virtio: introduce vhost-vDPA backend")
Cc: stable@dpdk.org

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio_user/vhost_vdpa.c | 90 +++++++++++++++++++--
 1 file changed, 85 insertions(+), 5 deletions(-)

diff --git a/drivers/net/virtio/virtio_user/vhost_vdpa.c b/drivers/net/virtio/virtio_user/vhost_vdpa.c
index c7b9349fc8..6d0200516d 100644
--- a/drivers/net/virtio/virtio_user/vhost_vdpa.c
+++ b/drivers/net/virtio/virtio_user/vhost_vdpa.c
@@ -66,6 +66,8 @@ struct vhost_iotlb_msg {
 #define VHOST_IOTLB_UPDATE         2
 #define VHOST_IOTLB_INVALIDATE     3
 #define VHOST_IOTLB_ACCESS_FAIL    4
+#define VHOST_IOTLB_BATCH_BEGIN    5
+#define VHOST_IOTLB_BATCH_END      6
 	uint8_t type;
 };
 
@@ -80,6 +82,40 @@ struct vhost_msg {
 	};
 };
 
+static int
+vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev)
+{
+	struct vhost_msg msg = {};
+
+	msg.type = VHOST_IOTLB_MSG_V2;
+	msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
+
+	if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
+		PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)",
+				strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev)
+{
+	struct vhost_msg msg = {};
+
+	msg.type = VHOST_IOTLB_MSG_V2;
+	msg.iotlb.type = VHOST_IOTLB_BATCH_END;
+
+	if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
+		PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)",
+				strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
 static int
 vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr,
 				  uint64_t iova, size_t len)
@@ -122,6 +158,39 @@ vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr,
 	return 0;
 }
 
+static int
+vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr,
+				  uint64_t iova, size_t len)
+{
+	int ret;
+
+	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
+		return -1;
+
+	ret = vhost_vdpa_dma_map(dev, addr, iova, len);
+
+	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
+		return -1;
+
+	return ret;
+}
+
+static int
+vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr,
+				  uint64_t iova, size_t len)
+{
+	int ret;
+
+	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
+		return -1;
+
+	ret = vhost_vdpa_dma_unmap(dev, addr, iova, len);
+
+	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
+		return -1;
+
+	return ret;
+}
 
 static int
 vhost_vdpa_map_contig(const struct rte_memseg_list *msl,
@@ -159,21 +228,32 @@ vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
 static int
 vhost_vdpa_dma_map_all(struct virtio_user_dev *dev)
 {
+	int ret;
+
+	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
+		return -1;
+
 	vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
 
 	if (rte_eal_iova_mode() == RTE_IOVA_VA) {
 		/* with IOVA as VA mode, we can get away with mapping contiguous
 		 * chunks rather than going page-by-page.
 		 */
-		int ret = rte_memseg_contig_walk_thread_unsafe(
+		ret = rte_memseg_contig_walk_thread_unsafe(
 				vhost_vdpa_map_contig, dev);
 		if (ret)
-			return ret;
+			goto batch_end;
 		/* we have to continue the walk because we've skipped the
 		 * external segments during the config walk.
 		 */
 	}
-	return rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
+	ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
+
+batch_end:
+	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
+		return -1;
+
+	return ret;
 }
 
 /* with below features, vhost vdpa does not need to do the checksum and TSO,
@@ -293,6 +373,6 @@ struct virtio_user_backend_ops virtio_ops_vdpa = {
 	.setup = vhost_vdpa_setup,
 	.send_request = vhost_vdpa_ioctl,
 	.enable_qp = vhost_vdpa_enable_queue_pair,
-	.dma_map = vhost_vdpa_dma_map,
-	.dma_unmap = vhost_vdpa_dma_unmap,
+	.dma_map = vhost_vdpa_dma_map_batch,
+	.dma_unmap = vhost_vdpa_dma_unmap_batch,
 };
-- 
2.26.2


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-dev] [PATCH 21.02 v1] net/virtio: fix memory init with vDPA backend
  2020-11-24 12:45 [dpdk-dev] [PATCH 21.02 v1] net/virtio: fix memory init with vDPA backend Maxime Coquelin
@ 2020-11-25  2:18 ` Jason Wang
  0 siblings, 0 replies; 3+ messages in thread
From: Jason Wang @ 2020-11-25  2:18 UTC (permalink / raw)
  To: Maxime Coquelin, dev, chenbo.xia, amorenoz; +Cc: stable


On 2020/11/24 下午8:45, Maxime Coquelin wrote:
> This patch fixes an overhead met with mlx5-vdpa Kernel
> driver, where for every page in the mapped area, all the
> memory tables gets updated. For example, with 2MB hugepages,
> a single IOTLB_UPDATE for a 1GB region causes 512 memory
> updates on mlx5-vdpa side.
>
> Using batching mode, the mlx5 driver will only trigger a
> single memory update for all the IOTLB updates that happen
> between the batch begin and batch end commands.
>
> Fixes: 6b901437056e ("net/virtio: introduce vhost-vDPA backend")
> Cc: stable@dpdk.org
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>   drivers/net/virtio/virtio_user/vhost_vdpa.c | 90 +++++++++++++++++++--
>   1 file changed, 85 insertions(+), 5 deletions(-)


Hi Maxime:

To be safe, it's better to check whether or not the kernel support 
batching flags.

This could be done by using VHOST_GET_BACKEND_FEATURES to test whether 
VHOST_BACKEND_F_IOTLB_BATCH is there.

Thanks


>
> diff --git a/drivers/net/virtio/virtio_user/vhost_vdpa.c b/drivers/net/virtio/virtio_user/vhost_vdpa.c
> index c7b9349fc8..6d0200516d 100644
> --- a/drivers/net/virtio/virtio_user/vhost_vdpa.c
> +++ b/drivers/net/virtio/virtio_user/vhost_vdpa.c
> @@ -66,6 +66,8 @@ struct vhost_iotlb_msg {
>   #define VHOST_IOTLB_UPDATE         2
>   #define VHOST_IOTLB_INVALIDATE     3
>   #define VHOST_IOTLB_ACCESS_FAIL    4
> +#define VHOST_IOTLB_BATCH_BEGIN    5
> +#define VHOST_IOTLB_BATCH_END      6
>   	uint8_t type;
>   };
>   
> @@ -80,6 +82,40 @@ struct vhost_msg {
>   	};
>   };
>   
> +static int
> +vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev)
> +{
> +	struct vhost_msg msg = {};
> +
> +	msg.type = VHOST_IOTLB_MSG_V2;
> +	msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
> +
> +	if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
> +		PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)",
> +				strerror(errno));
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev)
> +{
> +	struct vhost_msg msg = {};
> +
> +	msg.type = VHOST_IOTLB_MSG_V2;
> +	msg.iotlb.type = VHOST_IOTLB_BATCH_END;
> +
> +	if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
> +		PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)",
> +				strerror(errno));
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
>   static int
>   vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr,
>   				  uint64_t iova, size_t len)
> @@ -122,6 +158,39 @@ vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr,
>   	return 0;
>   }
>   
> +static int
> +vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr,
> +				  uint64_t iova, size_t len)
> +{
> +	int ret;
> +
> +	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
> +		return -1;
> +
> +	ret = vhost_vdpa_dma_map(dev, addr, iova, len);
> +
> +	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
> +		return -1;
> +
> +	return ret;
> +}
> +
> +static int
> +vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr,
> +				  uint64_t iova, size_t len)
> +{
> +	int ret;
> +
> +	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
> +		return -1;
> +
> +	ret = vhost_vdpa_dma_unmap(dev, addr, iova, len);
> +
> +	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
> +		return -1;
> +
> +	return ret;
> +}
>   
>   static int
>   vhost_vdpa_map_contig(const struct rte_memseg_list *msl,
> @@ -159,21 +228,32 @@ vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
>   static int
>   vhost_vdpa_dma_map_all(struct virtio_user_dev *dev)
>   {
> +	int ret;
> +
> +	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
> +		return -1;
> +
>   	vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
>   
>   	if (rte_eal_iova_mode() == RTE_IOVA_VA) {
>   		/* with IOVA as VA mode, we can get away with mapping contiguous
>   		 * chunks rather than going page-by-page.
>   		 */
> -		int ret = rte_memseg_contig_walk_thread_unsafe(
> +		ret = rte_memseg_contig_walk_thread_unsafe(
>   				vhost_vdpa_map_contig, dev);
>   		if (ret)
> -			return ret;
> +			goto batch_end;
>   		/* we have to continue the walk because we've skipped the
>   		 * external segments during the config walk.
>   		 */
>   	}
> -	return rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
> +	ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
> +
> +batch_end:
> +	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
> +		return -1;
> +
> +	return ret;
>   }
>   
>   /* with below features, vhost vdpa does not need to do the checksum and TSO,
> @@ -293,6 +373,6 @@ struct virtio_user_backend_ops virtio_ops_vdpa = {
>   	.setup = vhost_vdpa_setup,
>   	.send_request = vhost_vdpa_ioctl,
>   	.enable_qp = vhost_vdpa_enable_queue_pair,
> -	.dma_map = vhost_vdpa_dma_map,
> -	.dma_unmap = vhost_vdpa_dma_unmap,
> +	.dma_map = vhost_vdpa_dma_map_batch,
> +	.dma_unmap = vhost_vdpa_dma_unmap_batch,
>   };


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [dpdk-dev] [PATCH 21.02 v1] net/virtio: fix memory init with vDPA backend
@ 2020-11-24 12:45 Maxime Coquelin
  0 siblings, 0 replies; 3+ messages in thread
From: Maxime Coquelin @ 2020-11-24 12:45 UTC (permalink / raw)
  To: dev, chenbo.xia, amorenoz, jasowang; +Cc: Maxime Coquelin, stable

This patch fixes an overhead met with mlx5-vdpa Kernel
driver, where for every page in the mapped area, all the
memory tables gets updated. For example, with 2MB hugepages,
a single IOTLB_UPDATE for a 1GB region causes 512 memory
updates on mlx5-vdpa side.

Using batching mode, the mlx5 driver will only trigger a
single memory update for all the IOTLB updates that happen
between the batch begin and batch end commands.

Fixes: 6b901437056e ("net/virtio: introduce vhost-vDPA backend")
Cc: stable@dpdk.org

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio_user/vhost_vdpa.c | 90 +++++++++++++++++++--
 1 file changed, 85 insertions(+), 5 deletions(-)

diff --git a/drivers/net/virtio/virtio_user/vhost_vdpa.c b/drivers/net/virtio/virtio_user/vhost_vdpa.c
index c7b9349fc8..6d0200516d 100644
--- a/drivers/net/virtio/virtio_user/vhost_vdpa.c
+++ b/drivers/net/virtio/virtio_user/vhost_vdpa.c
@@ -66,6 +66,8 @@ struct vhost_iotlb_msg {
 #define VHOST_IOTLB_UPDATE         2
 #define VHOST_IOTLB_INVALIDATE     3
 #define VHOST_IOTLB_ACCESS_FAIL    4
+#define VHOST_IOTLB_BATCH_BEGIN    5
+#define VHOST_IOTLB_BATCH_END      6
 	uint8_t type;
 };
 
@@ -80,6 +82,40 @@ struct vhost_msg {
 	};
 };
 
+static int
+vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev)
+{
+	struct vhost_msg msg = {};
+
+	msg.type = VHOST_IOTLB_MSG_V2;
+	msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
+
+	if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
+		PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)",
+				strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev)
+{
+	struct vhost_msg msg = {};
+
+	msg.type = VHOST_IOTLB_MSG_V2;
+	msg.iotlb.type = VHOST_IOTLB_BATCH_END;
+
+	if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
+		PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)",
+				strerror(errno));
+		return -1;
+	}
+
+	return 0;
+}
+
 static int
 vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr,
 				  uint64_t iova, size_t len)
@@ -122,6 +158,39 @@ vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr,
 	return 0;
 }
 
+static int
+vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr,
+				  uint64_t iova, size_t len)
+{
+	int ret;
+
+	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
+		return -1;
+
+	ret = vhost_vdpa_dma_map(dev, addr, iova, len);
+
+	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
+		return -1;
+
+	return ret;
+}
+
+static int
+vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr,
+				  uint64_t iova, size_t len)
+{
+	int ret;
+
+	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
+		return -1;
+
+	ret = vhost_vdpa_dma_unmap(dev, addr, iova, len);
+
+	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
+		return -1;
+
+	return ret;
+}
 
 static int
 vhost_vdpa_map_contig(const struct rte_memseg_list *msl,
@@ -159,21 +228,32 @@ vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
 static int
 vhost_vdpa_dma_map_all(struct virtio_user_dev *dev)
 {
+	int ret;
+
+	if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
+		return -1;
+
 	vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
 
 	if (rte_eal_iova_mode() == RTE_IOVA_VA) {
 		/* with IOVA as VA mode, we can get away with mapping contiguous
 		 * chunks rather than going page-by-page.
 		 */
-		int ret = rte_memseg_contig_walk_thread_unsafe(
+		ret = rte_memseg_contig_walk_thread_unsafe(
 				vhost_vdpa_map_contig, dev);
 		if (ret)
-			return ret;
+			goto batch_end;
 		/* we have to continue the walk because we've skipped the
 		 * external segments during the config walk.
 		 */
 	}
-	return rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
+	ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
+
+batch_end:
+	if (vhost_vdpa_iotlb_batch_end(dev) < 0)
+		return -1;
+
+	return ret;
 }
 
 /* with below features, vhost vdpa does not need to do the checksum and TSO,
@@ -293,6 +373,6 @@ struct virtio_user_backend_ops virtio_ops_vdpa = {
 	.setup = vhost_vdpa_setup,
 	.send_request = vhost_vdpa_ioctl,
 	.enable_qp = vhost_vdpa_enable_queue_pair,
-	.dma_map = vhost_vdpa_dma_map,
-	.dma_unmap = vhost_vdpa_dma_unmap,
+	.dma_map = vhost_vdpa_dma_map_batch,
+	.dma_unmap = vhost_vdpa_dma_unmap_batch,
 };
-- 
2.26.2


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-11-25  2:18 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-24 12:45 [dpdk-dev] [PATCH 21.02 v1] net/virtio: fix memory init with vDPA backend Maxime Coquelin
2020-11-25  2:18 ` Jason Wang
  -- strict thread matches above, loose matches on Subject: below --
2020-11-24 12:45 Maxime Coquelin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).