DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, david.marchand@redhat.com,
	mkp@redhat.com, fbl@redhat.com, jasowang@redhat.com,
	cunming.liang@intel.com, xieyongji@bytedance.com,
	echaudro@redhat.com, eperezma@redhat.com, amorenoz@redhat.com,
	lulu@redhat.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH v4 02/26] vhost: add helper of IOTLB entries coredump
Date: Thu,  1 Jun 2023 22:07:48 +0200	[thread overview]
Message-ID: <20230601200812.672233-3-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20230601200812.672233-1-maxime.coquelin@redhat.com>

This patch reworks IOTLB code to extract madvise-related
bits into dedicated helper. This refactoring improves code
sharing.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
---
 lib/vhost/iotlb.c | 77 +++++++++++++++++++++++++----------------------
 1 file changed, 41 insertions(+), 36 deletions(-)

diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
index 870c8acb88..51d45de446 100644
--- a/lib/vhost/iotlb.c
+++ b/lib/vhost/iotlb.c
@@ -23,6 +23,34 @@ struct vhost_iotlb_entry {
 
 #define IOTLB_CACHE_SIZE 2048
 
+static void
+vhost_user_iotlb_set_dump(struct virtio_net *dev, struct vhost_iotlb_entry *node)
+{
+	uint64_t align;
+
+	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)node->uaddr);
+
+	mem_set_dump((void *)(uintptr_t)node->uaddr, node->size, true, align);
+}
+
+static void
+vhost_user_iotlb_clear_dump(struct virtio_net *dev, struct vhost_iotlb_entry *node,
+		struct vhost_iotlb_entry *prev, struct vhost_iotlb_entry *next)
+{
+	uint64_t align, mask;
+
+	align = hua_to_alignment(dev->mem, (void *)(uintptr_t)node->uaddr);
+	mask = ~(align - 1);
+
+	/* Don't disable coredump if the previous node is in the same page */
+	if (prev == NULL || (node->uaddr & mask) != ((prev->uaddr + prev->size - 1) & mask)) {
+		/* Don't disable coredump if the next node is in the same page */
+		if (next == NULL ||
+				((node->uaddr + node->size - 1) & mask) != (next->uaddr & mask))
+			mem_set_dump((void *)(uintptr_t)node->uaddr, node->size, false, align);
+	}
+}
+
 static struct vhost_iotlb_entry *
 vhost_user_iotlb_pool_get(struct vhost_virtqueue *vq)
 {
@@ -149,8 +177,8 @@ vhost_user_iotlb_cache_remove_all(struct virtio_net *dev, struct vhost_virtqueue
 	rte_rwlock_write_lock(&vq->iotlb_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
-		mem_set_dump((void *)(uintptr_t)node->uaddr, node->size, false,
-			hua_to_alignment(dev->mem, (void *)(uintptr_t)node->uaddr));
+		vhost_user_iotlb_clear_dump(dev, node, NULL, NULL);
+
 		TAILQ_REMOVE(&vq->iotlb_list, node, next);
 		vhost_user_iotlb_pool_put(vq, node);
 	}
@@ -164,7 +192,6 @@ static void
 vhost_user_iotlb_cache_random_evict(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
 	struct vhost_iotlb_entry *node, *temp_node, *prev_node = NULL;
-	uint64_t alignment, mask;
 	int entry_idx;
 
 	rte_rwlock_write_lock(&vq->iotlb_lock);
@@ -173,20 +200,10 @@ vhost_user_iotlb_cache_random_evict(struct virtio_net *dev, struct vhost_virtque
 
 	RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
 		if (!entry_idx) {
-			struct vhost_iotlb_entry *next_node;
-			alignment = hua_to_alignment(dev->mem, (void *)(uintptr_t)node->uaddr);
-			mask = ~(alignment - 1);
-
-			/* Don't disable coredump if the previous node is in the same page */
-			if (prev_node == NULL || (node->uaddr & mask) !=
-					((prev_node->uaddr + prev_node->size - 1) & mask)) {
-				next_node = RTE_TAILQ_NEXT(node, next);
-				/* Don't disable coredump if the next node is in the same page */
-				if (next_node == NULL || ((node->uaddr + node->size - 1) & mask) !=
-						(next_node->uaddr & mask))
-					mem_set_dump((void *)(uintptr_t)node->uaddr, node->size,
-							false, alignment);
-			}
+			struct vhost_iotlb_entry *next_node = RTE_TAILQ_NEXT(node, next);
+
+			vhost_user_iotlb_clear_dump(dev, node, prev_node, next_node);
+
 			TAILQ_REMOVE(&vq->iotlb_list, node, next);
 			vhost_user_iotlb_pool_put(vq, node);
 			vq->iotlb_cache_nr--;
@@ -240,16 +257,16 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev, struct vhost_virtqueue *vq
 			vhost_user_iotlb_pool_put(vq, new_node);
 			goto unlock;
 		} else if (node->iova > new_node->iova) {
-			mem_set_dump((void *)(uintptr_t)new_node->uaddr, new_node->size, true,
-				hua_to_alignment(dev->mem, (void *)(uintptr_t)new_node->uaddr));
+			vhost_user_iotlb_set_dump(dev, new_node);
+
 			TAILQ_INSERT_BEFORE(node, new_node, next);
 			vq->iotlb_cache_nr++;
 			goto unlock;
 		}
 	}
 
-	mem_set_dump((void *)(uintptr_t)new_node->uaddr, new_node->size, true,
-		hua_to_alignment(dev->mem, (void *)(uintptr_t)new_node->uaddr));
+	vhost_user_iotlb_set_dump(dev, new_node);
+
 	TAILQ_INSERT_TAIL(&vq->iotlb_list, new_node, next);
 	vq->iotlb_cache_nr++;
 
@@ -265,7 +282,6 @@ vhost_user_iotlb_cache_remove(struct virtio_net *dev, struct vhost_virtqueue *vq
 					uint64_t iova, uint64_t size)
 {
 	struct vhost_iotlb_entry *node, *temp_node, *prev_node = NULL;
-	uint64_t alignment, mask;
 
 	if (unlikely(!size))
 		return;
@@ -278,20 +294,9 @@ vhost_user_iotlb_cache_remove(struct virtio_net *dev, struct vhost_virtqueue *vq
 			break;
 
 		if (iova < node->iova + node->size) {
-			struct vhost_iotlb_entry *next_node;
-			alignment = hua_to_alignment(dev->mem, (void *)(uintptr_t)node->uaddr);
-			mask = ~(alignment-1);
-
-			/* Don't disable coredump if the previous node is in the same page */
-			if (prev_node == NULL || (node->uaddr & mask) !=
-					((prev_node->uaddr + prev_node->size - 1) & mask)) {
-				next_node = RTE_TAILQ_NEXT(node, next);
-				/* Don't disable coredump if the next node is in the same page */
-				if (next_node == NULL || ((node->uaddr + node->size - 1) & mask) !=
-						(next_node->uaddr & mask))
-					mem_set_dump((void *)(uintptr_t)node->uaddr, node->size,
-							false, alignment);
-			}
+			struct vhost_iotlb_entry *next_node = RTE_TAILQ_NEXT(node, next);
+
+			vhost_user_iotlb_clear_dump(dev, node, prev_node, next_node);
 
 			TAILQ_REMOVE(&vq->iotlb_list, node, next);
 			vhost_user_iotlb_pool_put(vq, node);
-- 
2.40.1


  parent reply	other threads:[~2023-06-01 20:08 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-06-01 20:07 [PATCH v4 00/26] Add VDUSE support to Vhost library Maxime Coquelin
2023-06-01 20:07 ` [PATCH v4 01/26] vhost: fix IOTLB entries overlap check with previous entry Maxime Coquelin
2023-06-01 20:07 ` Maxime Coquelin [this message]
2023-06-01 20:07 ` [PATCH v4 03/26] vhost: add helper for IOTLB entries shared page check Maxime Coquelin
2023-06-01 20:07 ` [PATCH v4 04/26] vhost: don't dump unneeded pages with IOTLB Maxime Coquelin
2023-06-01 20:07 ` [PATCH v4 05/26] vhost: change to single IOTLB cache per device Maxime Coquelin
2023-06-01 20:07 ` [PATCH v4 06/26] vhost: add offset field to IOTLB entries Maxime Coquelin
2023-06-01 20:07 ` [PATCH v4 07/26] vhost: add page size info to IOTLB entry Maxime Coquelin
2023-06-01 20:07 ` [PATCH v4 08/26] vhost: retry translating IOVA after IOTLB miss Maxime Coquelin
2023-06-01 20:07 ` [PATCH v4 09/26] vhost: introduce backend ops Maxime Coquelin
2023-06-01 20:07 ` [PATCH v4 10/26] vhost: add IOTLB cache entry removal callback Maxime Coquelin
2023-06-01 20:07 ` [PATCH v4 11/26] vhost: add helper for IOTLB misses Maxime Coquelin
2023-06-01 20:07 ` [PATCH v4 12/26] vhost: add helper for interrupt injection Maxime Coquelin
2023-06-01 20:07 ` [PATCH v4 13/26] vhost: add API to set max queue pairs Maxime Coquelin
2023-06-05  7:56   ` Xia, Chenbo
2023-06-01 20:08 ` [PATCH v4 14/26] net/vhost: use " Maxime Coquelin
2023-06-01 20:08 ` [PATCH v4 15/26] vhost: add control virtqueue support Maxime Coquelin
2023-06-01 20:08 ` [PATCH v4 16/26] vhost: add VDUSE device creation and destruction Maxime Coquelin
2023-06-01 20:08 ` [PATCH v4 17/26] vhost: add VDUSE callback for IOTLB miss Maxime Coquelin
2023-06-01 20:08 ` [PATCH v4 18/26] vhost: add VDUSE callback for IOTLB entry removal Maxime Coquelin
2023-06-01 20:08 ` [PATCH v4 19/26] vhost: add VDUSE callback for IRQ injection Maxime Coquelin
2023-06-01 20:08 ` [PATCH v4 20/26] vhost: add VDUSE events handler Maxime Coquelin
2023-06-01 20:08 ` [PATCH v4 21/26] vhost: add support for virtqueue state get event Maxime Coquelin
2023-06-01 20:08 ` [PATCH v4 22/26] vhost: add support for VDUSE status set event Maxime Coquelin
2023-06-01 20:08 ` [PATCH v4 23/26] vhost: add support for VDUSE IOTLB update event Maxime Coquelin
2023-06-01 20:08 ` [PATCH v4 24/26] vhost: add VDUSE device startup Maxime Coquelin
2023-06-01 20:08 ` [PATCH v4 25/26] vhost: add multiqueue support to VDUSE Maxime Coquelin
2023-06-01 20:08 ` [PATCH v4 26/26] vhost: add VDUSE device stop Maxime Coquelin
2023-06-05  7:56   ` Xia, Chenbo
2023-06-06  8:14     ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230601200812.672233-3-maxime.coquelin@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=amorenoz@redhat.com \
    --cc=chenbo.xia@intel.com \
    --cc=cunming.liang@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=echaudro@redhat.com \
    --cc=eperezma@redhat.com \
    --cc=fbl@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=lulu@redhat.com \
    --cc=mkp@redhat.com \
    --cc=xieyongji@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).