DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] vhost: exclude VM hugepages from coredumps
@ 2022-12-06  4:46 Mike Pattrick
  0 siblings, 0 replies; only message in thread
From: Mike Pattrick @ 2022-12-06  4:46 UTC (permalink / raw)
  To: Maxime Coquelin, Chenbo Xia; +Cc: dev, Mike Pattrick

Currently if an application wants to include shared hugepages in
coredumps in conjunction with the vhost library, the coredump will be
larger than expected and include unneeded virtual machine memory.

This patch will mark all vhost huge pages as DONTDUMP, except for some
select pages used by DPDK.

Signed-off-by: Mike Pattrick <mkp@redhat.com>
---
 lib/vhost/iotlb.c      |  5 +++++
 lib/vhost/vhost.h      | 11 +++++++++++
 lib/vhost/vhost_user.c | 10 ++++++++++
 3 files changed, 26 insertions(+)

diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
index 6a729e8804..2f89f88817 100644
--- a/lib/vhost/iotlb.c
+++ b/lib/vhost/iotlb.c
@@ -149,6 +149,7 @@ vhost_user_iotlb_cache_remove_all(struct vhost_virtqueue *vq)
 	rte_rwlock_write_lock(&vq->iotlb_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
+		mem_set_dump((void *)node->uaddr, node->size, true);
 		TAILQ_REMOVE(&vq->iotlb_list, node, next);
 		vhost_user_iotlb_pool_put(vq, node);
 	}
@@ -170,6 +171,7 @@ vhost_user_iotlb_cache_random_evict(struct vhost_virtqueue *vq)
 
 	RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) {
 		if (!entry_idx) {
+			mem_set_dump((void *)node->uaddr, node->size, true);
 			TAILQ_REMOVE(&vq->iotlb_list, node, next);
 			vhost_user_iotlb_pool_put(vq, node);
 			vq->iotlb_cache_nr--;
@@ -222,12 +224,14 @@ vhost_user_iotlb_cache_insert(struct virtio_net *dev, struct vhost_virtqueue *vq
 			vhost_user_iotlb_pool_put(vq, new_node);
 			goto unlock;
 		} else if (node->iova > new_node->iova) {
+			mem_set_dump((void *)node->uaddr, node->size, true);
 			TAILQ_INSERT_BEFORE(node, new_node, next);
 			vq->iotlb_cache_nr++;
 			goto unlock;
 		}
 	}
 
+	mem_set_dump((void *)node->uaddr, node->size, true);
 	TAILQ_INSERT_TAIL(&vq->iotlb_list, new_node, next);
 	vq->iotlb_cache_nr++;
 
@@ -255,6 +259,7 @@ vhost_user_iotlb_cache_remove(struct vhost_virtqueue *vq,
 			break;
 
 		if (iova < node->iova + node->size) {
+			mem_set_dump((void *)node->uaddr, node->size, true);
 			TAILQ_REMOVE(&vq->iotlb_list, node, next);
 			vhost_user_iotlb_pool_put(vq, node);
 			vq->iotlb_cache_nr--;
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index ef211ed519..09e1d5d97b 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -987,4 +987,15 @@ mbuf_is_consumed(struct rte_mbuf *m)
 
 	return true;
 }
+
+static __rte_always_inline void
+mem_set_dump(void *ptr, size_t size, bool enable)
+{
+#ifdef MADV_DONTDUMP
+	if (madvise(ptr, size, enable ? MADV_DODUMP : MADV_DONTDUMP) == -1) {
+		rte_log(RTE_LOG_INFO, vhost_config_log_level,
+			"VHOST_CONFIG: could not set coredump preference (%s).\n", strerror(errno));
+	}
+#endif
+}
 #endif /* _VHOST_NET_CDEV_H_ */
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 9902ae9944..8f33d5f4d9 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -793,6 +793,9 @@ translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 			return;
 		}
 
+		mem_set_dump(vq->desc_packed, len, true);
+		mem_set_dump(vq->driver_event, len, true);
+		mem_set_dump(vq->device_event, len, true);
 		vq->access_ok = true;
 		return;
 	}
@@ -846,6 +849,9 @@ translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 			"some packets maybe resent for Tx and dropped for Rx\n");
 	}
 
+	mem_set_dump(vq->desc, len, true);
+	mem_set_dump(vq->avail, len, true);
+	mem_set_dump(vq->used, len, true);
 	vq->access_ok = true;
 
 	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "mapped address desc: %p\n", vq->desc);
@@ -1224,6 +1230,7 @@ vhost_user_mmap_region(struct virtio_net *dev,
 	region->mmap_addr = mmap_addr;
 	region->mmap_size = mmap_size;
 	region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset;
+	mem_set_dump(mmap_addr, mmap_size, false);
 
 	if (dev->async_copy) {
 		if (add_guest_pages(dev, region, alignment) < 0) {
@@ -1528,6 +1535,7 @@ inflight_mem_alloc(struct virtio_net *dev, const char *name, size_t size, int *f
 		return NULL;
 	}
 
+	mem_set_dump(ptr, size, false);
 	*fd = mfd;
 	return ptr;
 }
@@ -1736,6 +1744,7 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev,
 		dev->inflight_info->fd = -1;
 	}
 
+	mem_set_dump(addr, mmap_size, false);
 	dev->inflight_info->fd = fd;
 	dev->inflight_info->addr = addr;
 	dev->inflight_info->size = mmap_size;
@@ -2283,6 +2292,7 @@ vhost_user_set_log_base(struct virtio_net **pdev,
 	dev->log_addr = (uint64_t)(uintptr_t)addr;
 	dev->log_base = dev->log_addr + off;
 	dev->log_size = size;
+	mem_set_dump(addr, size, false);
 
 	for (i = 0; i < dev->nr_vring; i++) {
 		struct vhost_virtqueue *vq = dev->virtqueue[i];
-- 
2.31.1


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2022-12-06  4:46 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-12-06  4:46 [PATCH] vhost: exclude VM hugepages from coredumps Mike Pattrick

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).