DPDK patches and discussions
 help / color / mirror / Atom feed
From: David Marchand <david.marchand@redhat.com>
To: dev@dpdk.org
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>,
	Chenbo Xia <chenbo.xia@intel.com>
Subject: [PATCH] vhost: reduce memory footprint when IOMMU is disabled
Date: Fri, 16 Sep 2022 11:02:02 +0200	[thread overview]
Message-ID: <20220916090202.1190834-1-david.marchand@redhat.com> (raw)

If an application does not request IOMMU support, we can avoid
allocating a IOMMU pool.

This saves 112kB (IOTLB_CACHE_SIZE * sizeof(struct vhost_iotlb_entry))
per vq.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 lib/vhost/iotlb.c  | 20 +++++++++++---------
 lib/vhost/socket.c |  4 +++-
 lib/vhost/vhost.c  |  7 ++++++-
 lib/vhost/vhost.h  |  5 ++++-
 4 files changed, 24 insertions(+), 12 deletions(-)

diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
index 2a78929e78..6a729e8804 100644
--- a/lib/vhost/iotlb.c
+++ b/lib/vhost/iotlb.c
@@ -341,16 +341,18 @@ vhost_user_iotlb_init(struct virtio_net *dev, struct vhost_virtqueue *vq)
 	TAILQ_INIT(&vq->iotlb_list);
 	TAILQ_INIT(&vq->iotlb_pending_list);
 
-	vq->iotlb_pool = rte_calloc_socket("iotlb", IOTLB_CACHE_SIZE,
-		sizeof(struct vhost_iotlb_entry), 0, socket);
-	if (!vq->iotlb_pool) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"Failed to create IOTLB cache pool for vq %"PRIu32"\n",
-			vq->index);
-		return -1;
+	if (dev->flags & VIRTIO_DEV_SUPPORT_IOMMU) {
+		vq->iotlb_pool = rte_calloc_socket("iotlb", IOTLB_CACHE_SIZE,
+			sizeof(struct vhost_iotlb_entry), 0, socket);
+		if (!vq->iotlb_pool) {
+			VHOST_LOG_CONFIG(dev->ifname, ERR,
+				"Failed to create IOTLB cache pool for vq %"PRIu32"\n",
+				vq->index);
+			return -1;
+		}
+		for (i = 0; i < IOTLB_CACHE_SIZE; i++)
+			vhost_user_iotlb_pool_put(vq, &vq->iotlb_pool[i]);
 	}
-	for (i = 0; i < IOTLB_CACHE_SIZE; i++)
-		vhost_user_iotlb_pool_put(vq, &vq->iotlb_pool[i]);
 
 	vq->iotlb_cache_nr = 0;
 
diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c
index a8df2d484a..608ae577fd 100644
--- a/lib/vhost/socket.c
+++ b/lib/vhost/socket.c
@@ -228,7 +228,8 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
 	vhost_set_ifname(vid, vsocket->path, size);
 
 	vhost_setup_virtio_net(vid, vsocket->use_builtin_virtio_net,
-		vsocket->net_compliant_ol_flags, vsocket->stats_enabled);
+		vsocket->net_compliant_ol_flags, vsocket->stats_enabled,
+		vsocket->iommu_support);
 
 	vhost_attach_vdpa_device(vid, vsocket->vdpa_dev);
 
@@ -905,6 +906,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
 	vsocket->async_copy = flags & RTE_VHOST_USER_ASYNC_COPY;
 	vsocket->net_compliant_ol_flags = flags & RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
 	vsocket->stats_enabled = flags & RTE_VHOST_USER_NET_STATS_ENABLE;
+	vsocket->iommu_support = flags & RTE_VHOST_USER_IOMMU_SUPPORT;
 
 	if (vsocket->async_copy &&
 		(flags & (RTE_VHOST_USER_IOMMU_SUPPORT |
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index aa671f47a3..9e5e69f72d 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -771,7 +771,8 @@ vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
 }
 
 void
-vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags, bool stats_enabled)
+vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags, bool stats_enabled,
+	bool support_iommu)
 {
 	struct virtio_net *dev = get_device(vid);
 
@@ -790,6 +791,10 @@ vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags, bool stats
 		dev->flags |= VIRTIO_DEV_STATS_ENABLED;
 	else
 		dev->flags &= ~VIRTIO_DEV_STATS_ENABLED;
+	if (support_iommu)
+		dev->flags |= VIRTIO_DEV_SUPPORT_IOMMU;
+	else
+		dev->flags &= ~VIRTIO_DEV_SUPPORT_IOMMU;
 }
 
 void
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 782d916ae0..3da6f217fd 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -37,6 +37,8 @@
 #define VIRTIO_DEV_LEGACY_OL_FLAGS ((uint32_t)1 << 5)
 /*  Used to indicate the application has requested statistics collection */
 #define VIRTIO_DEV_STATS_ENABLED ((uint32_t)1 << 6)
+/*  Used to indicate the application has requested iommu support */
+#define VIRTIO_DEV_SUPPORT_IOMMU ((uint32_t)1 << 7)
 
 /* Backend value set by guest. */
 #define VIRTIO_DEV_STOPPED -1
@@ -803,7 +805,8 @@ int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
 void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
 
 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
-void vhost_setup_virtio_net(int vid, bool enable, bool legacy_ol_flags, bool stats_enabled);
+void vhost_setup_virtio_net(int vid, bool enable, bool legacy_ol_flags, bool stats_enabled,
+	bool support_iommu);
 void vhost_enable_extbuf(int vid);
 void vhost_enable_linearbuf(int vid);
 int vhost_enable_guest_notification(struct virtio_net *dev,
-- 
2.37.3


             reply	other threads:[~2022-09-16  9:02 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-16  9:02 David Marchand [this message]
2022-09-22 13:08 ` Xia, Chenbo
2022-09-29  8:39   ` Xia, Chenbo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220916090202.1190834-1-david.marchand@redhat.com \
    --to=david.marchand@redhat.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).