DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, david.marchand@redhat.com,
	mkp@redhat.com, fbl@redhat.com, jasowang@redhat.com,
	cunming.liang@intel.com, xieyongji@bytedance.com,
	echaudro@redhat.com, eperezma@redhat.com, amorenoz@redhat.com,
	lulu@redhat.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH v3 09/28] vhost: add page size info to IOTLB entry
Date: Thu, 25 May 2023 18:25:32 +0200	[thread overview]
Message-ID: <20230525162551.70359-10-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20230525162551.70359-1-maxime.coquelin@redhat.com>

VDUSE will close the file descriptor after having mapped
the shared memory, so it will not be possible to get the
page size afterwards.

This patch adds an new page_shift field to the IOTLB entry,
so that the information will be passed at IOTLB cache
insertion time. The information is stored as a bit shift
value so that IOTLB entry keeps fitting in a single
cacheline.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/vhost/iotlb.c      | 46 ++++++++++++++++++++----------------------
 lib/vhost/iotlb.h      |  2 +-
 lib/vhost/vhost.h      |  1 -
 lib/vhost/vhost_user.c |  8 +++++---
 4 files changed, 28 insertions(+), 29 deletions(-)

diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
index 14d143366b..a23008909f 100644
--- a/lib/vhost/iotlb.c
+++ b/lib/vhost/iotlb.c
@@ -19,14 +19,14 @@ struct vhost_iotlb_entry {
 	uint64_t uaddr;
 	uint64_t uoffset;
 	uint64_t size;
+	uint8_t page_shift;
 	uint8_t perm;
 };
 
 #define IOTLB_CACHE_SIZE 2048
 
 static bool
-vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct vhost_iotlb_entry *b,
-		uint64_t align)
+vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct vhost_iotlb_entry *b)
 {
 	uint64_t a_start, a_end, b_start;
 
@@ -38,44 +38,41 @@ vhost_user_iotlb_share_page(struct vhost_iotlb_entry *a, struct vhost_iotlb_entr
 
 	/* Assumes entry a lower than entry b */
 	RTE_ASSERT(a_start < b_start);
-	a_end = RTE_ALIGN_CEIL(a_start + a->size, align);
-	b_start = RTE_ALIGN_FLOOR(b_start, align);
+	a_end = RTE_ALIGN_CEIL(a_start + a->size, RTE_BIT64(a->page_shift));
+	b_start = RTE_ALIGN_FLOOR(b_start, RTE_BIT64(b->page_shift));
 
 	return a_end > b_start;
 }
 
 static void
-vhost_user_iotlb_set_dump(struct virtio_net *dev, struct vhost_iotlb_entry *node)
+vhost_user_iotlb_set_dump(struct vhost_iotlb_entry *node)
 {
-	uint64_t align, start;
+	uint64_t start;
 
 	start = node->uaddr + node->uoffset;
-	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)start);
-
-	mem_set_dump((void *)(uintptr_t)start, node->size, true, align);
+	mem_set_dump((void *)(uintptr_t)start, node->size, true, RTE_BIT64(node->page_shift));
 }
 
 static void
-vhost_user_iotlb_clear_dump(struct virtio_net *dev, struct vhost_iotlb_entry *node,
+vhost_user_iotlb_clear_dump(struct vhost_iotlb_entry *node,
 		struct vhost_iotlb_entry *prev, struct vhost_iotlb_entry *next)
 {
-	uint64_t align, start, end;
+	uint64_t start, end;
 
 	start = node->uaddr + node->uoffset;
 	end = start + node->size;
 
-	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)start);
-
 	/* Skip first page if shared with previous entry. */
-	if (vhost_user_iotlb_share_page(prev, node, align))
-		start = RTE_ALIGN_CEIL(start, align);
+	if (vhost_user_iotlb_share_page(prev, node))
+		start = RTE_ALIGN_CEIL(start, RTE_BIT64(node->page_shift));
 
 	/* Skip last page if shared with next entry. */
-	if (vhost_user_iotlb_share_page(node, next, align))
-		end = RTE_ALIGN_FLOOR(end, align);
+	if (vhost_user_iotlb_share_page(node, next))
+		end = RTE_ALIGN_FLOOR(end, RTE_BIT64(node->page_shift));
 
 	if (end > start)
-		mem_set_dump((void *)(uintptr_t)start, end - start, false, align);
+		mem_set_dump((void *)(uintptr_t)start, end - start, false,
+			RTE_BIT64(node->page_shift));
 }
 
 static struct vhost_iotlb_entry *
@@ -198,7 +195,7 @@ vhost_user_iotlb_cache_remove_all(struct virtio_net *dev)
 	vhost_user_iotlb_wr_lock_all(dev);
 
 	RTE_TAILQ_FOREACH_SAFE(node, &dev->iotlb_list, next, temp_node) {
-		vhost_user_iotlb_set_dump(dev, node);
+		vhost_user_iotlb_set_dump(node);
 
 		TAILQ_REMOVE(&dev->iotlb_list, node, next);
 		vhost_user_iotlb_pool_put(dev, node);
@@ -223,7 +220,7 @@ vhost_user_iotlb_cache_random_evict(struct virtio_net *dev)
 		if (!entry_idx) {
 			struct vhost_iotlb_entry *next_node = RTE_TAILQ_NEXT(node, next);
 
-			vhost_user_iotlb_clear_dump(dev, node, prev_node, next_node);
+			vhost_user_iotlb_clear_dump(node, prev_node, next_node);
 
 			TAILQ_REMOVE(&dev->iotlb_list, node, next);
 			vhost_user_iotlb_pool_put(dev, node);
@@ -239,7 +236,7 @@ vhost_user_iotlb_cache_random_evict(struct virtio_net *dev)
 
 void
 vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t uaddr,
-				uint64_t uoffset, uint64_t size, uint8_t perm)
+				uint64_t uoffset, uint64_t size, uint64_t page_size, uint8_t perm)
 {
 	struct vhost_iotlb_entry *node, *new_node;
 
@@ -263,6 +260,7 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t ua
 	new_node->uaddr = uaddr;
 	new_node->uoffset = uoffset;
 	new_node->size = size;
+	new_node->page_shift = __builtin_ctzll(page_size);
 	new_node->perm = perm;
 
 	vhost_user_iotlb_wr_lock_all(dev);
@@ -276,7 +274,7 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t ua
 			vhost_user_iotlb_pool_put(dev, new_node);
 			goto unlock;
 		} else if (node->iova > new_node->iova) {
-			vhost_user_iotlb_set_dump(dev, new_node);
+			vhost_user_iotlb_set_dump(new_node);
 
 			TAILQ_INSERT_BEFORE(node, new_node, next);
 			dev->iotlb_cache_nr++;
@@ -284,7 +282,7 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t ua
 		}
 	}
 
-	vhost_user_iotlb_set_dump(dev, new_node);
+	vhost_user_iotlb_set_dump(new_node);
 
 	TAILQ_INSERT_TAIL(&dev->iotlb_list, new_node, next);
 	dev->iotlb_cache_nr++;
@@ -313,7 +311,7 @@ vhost_user_iotlb_cache_remove(struct virtio_net *dev, uint64_t iova, uint64_t si
 		if (iova < node->iova + node->size) {
 			struct vhost_iotlb_entry *next_node = RTE_TAILQ_NEXT(node, next);
 
-			vhost_user_iotlb_clear_dump(dev, node, prev_node, next_node);
+			vhost_user_iotlb_clear_dump(node, prev_node, next_node);
 
 			TAILQ_REMOVE(&dev->iotlb_list, node, next);
 			vhost_user_iotlb_pool_put(dev, node);
diff --git a/lib/vhost/iotlb.h b/lib/vhost/iotlb.h
index bee36c5903..81ca04df21 100644
--- a/lib/vhost/iotlb.h
+++ b/lib/vhost/iotlb.h
@@ -58,7 +58,7 @@ vhost_user_iotlb_wr_unlock_all(struct virtio_net *dev)
 }
 
 void vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t uaddr,
-					uint64_t uoffset, uint64_t size, uint8_t perm);
+		uint64_t uoffset, uint64_t size, uint64_t page_size, uint8_t perm);
 void vhost_user_iotlb_cache_remove(struct virtio_net *dev, uint64_t iova, uint64_t size);
 uint64_t vhost_user_iotlb_cache_find(struct virtio_net *dev, uint64_t iova,
 					uint64_t *size, uint8_t perm);
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 67cc4a2fdb..4ace5ab081 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -1016,6 +1016,5 @@ mbuf_is_consumed(struct rte_mbuf *m)
 	return true;
 }
 
-uint64_t hua_to_alignment(struct rte_vhost_memory *mem, void *ptr);
 void mem_set_dump(void *ptr, size_t size, bool enable, uint64_t alignment);
 #endif /* _VHOST_NET_CDEV_H_ */
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 222ccbf819..11b265c1ba 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -743,7 +743,7 @@ log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)
 	return log_gpa;
 }
 
-uint64_t
+static uint64_t
 hua_to_alignment(struct rte_vhost_memory *mem, void *ptr)
 {
 	struct rte_vhost_mem_region *r;
@@ -2632,7 +2632,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
 	struct virtio_net *dev = *pdev;
 	struct vhost_iotlb_msg *imsg = &ctx->msg.payload.iotlb;
 	uint16_t i;
-	uint64_t vva, len;
+	uint64_t vva, len, pg_sz;
 
 	switch (imsg->type) {
 	case VHOST_IOTLB_UPDATE:
@@ -2641,7 +2641,9 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
 		if (!vva)
 			return RTE_VHOST_MSG_RESULT_ERR;
 
-		vhost_user_iotlb_cache_insert(dev, imsg->iova, vva, 0, len, imsg->perm);
+		pg_sz = hua_to_alignment(dev->mem, (void *)(uintptr_t)vva);
+
+		vhost_user_iotlb_cache_insert(dev, imsg->iova, vva, 0, len, pg_sz, imsg->perm);
 
 		for (i = 0; i < dev->nr_vring; i++) {
 			struct vhost_virtqueue *vq = dev->virtqueue[i];
-- 
2.40.1


  parent reply	other threads:[~2023-05-25 16:26 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-25 16:25 [PATCH v3 00/28] Add VDUSE support to Vhost library Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 01/28] vhost: fix missing guest notif stat increment Maxime Coquelin
2023-06-01 19:59   ` Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 02/28] vhost: fix invalid call FD handling Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 03/28] vhost: fix IOTLB entries overlap check with previous entry Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 04/28] vhost: add helper of IOTLB entries coredump Maxime Coquelin
2023-05-26  8:46   ` David Marchand
2023-06-01 13:43     ` Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 05/28] vhost: add helper for IOTLB entries shared page check Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 06/28] vhost: don't dump unneeded pages with IOTLB Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 07/28] vhost: change to single IOTLB cache per device Maxime Coquelin
2023-05-29  6:32   ` Xia, Chenbo
2023-05-25 16:25 ` [PATCH v3 08/28] vhost: add offset field to IOTLB entries Maxime Coquelin
2023-05-25 16:25 ` Maxime Coquelin [this message]
2023-05-29  6:32   ` [PATCH v3 09/28] vhost: add page size info to IOTLB entry Xia, Chenbo
2023-05-25 16:25 ` [PATCH v3 10/28] vhost: retry translating IOVA after IOTLB miss Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 11/28] vhost: introduce backend ops Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 12/28] vhost: add IOTLB cache entry removal callback Maxime Coquelin
2023-05-29  6:33   ` Xia, Chenbo
2023-05-25 16:25 ` [PATCH v3 13/28] vhost: add helper for IOTLB misses Maxime Coquelin
2023-05-29  6:33   ` Xia, Chenbo
2023-05-25 16:25 ` [PATCH v3 14/28] vhost: add helper for interrupt injection Maxime Coquelin
2023-05-26  8:54   ` David Marchand
2023-06-01 13:58     ` Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 15/28] vhost: add API to set max queue pairs Maxime Coquelin
2023-05-26  8:58   ` David Marchand
2023-06-01 14:00     ` Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 16/28] net/vhost: use " Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 17/28] vhost: add control virtqueue support Maxime Coquelin
2023-05-29  6:51   ` Xia, Chenbo
2023-05-25 16:25 ` [PATCH v3 18/28] vhost: add VDUSE device creation and destruction Maxime Coquelin
2023-05-26  9:11   ` David Marchand
2023-06-01 14:05     ` Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 19/28] vhost: add VDUSE callback for IOTLB miss Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 20/28] vhost: add VDUSE callback for IOTLB entry removal Maxime Coquelin
2023-05-29  6:51   ` Xia, Chenbo
2023-05-25 16:25 ` [PATCH v3 21/28] vhost: add VDUSE callback for IRQ injection Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 22/28] vhost: add VDUSE events handler Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 23/28] vhost: add support for virtqueue state get event Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 24/28] vhost: add support for VDUSE status set event Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 25/28] vhost: add support for VDUSE IOTLB update event Maxime Coquelin
2023-05-29  6:52   ` Xia, Chenbo
2023-05-25 16:25 ` [PATCH v3 26/28] vhost: add VDUSE device startup Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 27/28] vhost: add multiqueue support to VDUSE Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 28/28] vhost: add VDUSE device stop Maxime Coquelin
2023-05-29  6:53   ` Xia, Chenbo
2023-06-01 18:48     ` Maxime Coquelin
2023-05-26  9:14 ` [PATCH v3 00/28] Add VDUSE support to Vhost library David Marchand
2023-06-01 14:59   ` Maxime Coquelin
2023-06-01 15:18     ` David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230525162551.70359-10-maxime.coquelin@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=amorenoz@redhat.com \
    --cc=chenbo.xia@intel.com \
    --cc=cunming.liang@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=echaudro@redhat.com \
    --cc=eperezma@redhat.com \
    --cc=fbl@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=lulu@redhat.com \
    --cc=mkp@redhat.com \
    --cc=xieyongji@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).