From: Pravin M Bathija <pravin.bathija@dell.com>
To: <dev@dpdk.org>
Cc: <pravin.bathija@dell.com>, <pravin.m.bathija.dev@gmail.com>
Subject: [PATCH v4 4/5] vhost_user: Function defs for add/rem mem regions
Date: Tue, 11 Nov 2025 11:45:20 +0000 [thread overview]
Message-ID: <20251111114521.2902867-5-pravin.bathija@dell.com> (raw)
In-Reply-To: <20251111114521.2902867-1-pravin.bathija@dell.com>
These changes cover the function definition for add/remove memory
region calls which are invoked on receiving vhost user message from
vhost user front-end (e.g. Qemu). In our case, in addition to testing
with qemu front-end, the testing has also been performed with libblkio
front-end and spdk/dpdk back-end. We did I/O using libblkio based device
driver, to spdk based drives. There are also changes for set_mem_table
and new definition for get memory slots. Our changes optimize the set
memory table call to use common support functions. Message get memory
slots is how the vhost-user front-end queries the vhost-user back-end
about the number of memory slots available to be registered by the
back-end. In addition support function to invalidate vring is also
defined which is used in add/remove memory region functions.
Signed-off-by: Pravin M Bathija <pravin.bathija@dell.com>
---
lib/vhost/vhost_user.c | 249 +++++++++++++++++++++++++++++++++++------
1 file changed, 217 insertions(+), 32 deletions(-)
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 7dc21fe42a..d9cf967ba2 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -71,6 +71,9 @@ VHOST_MESSAGE_HANDLER(VHOST_USER_SET_FEATURES, vhost_user_set_features, false, t
VHOST_MESSAGE_HANDLER(VHOST_USER_SET_OWNER, vhost_user_set_owner, false, true) \
VHOST_MESSAGE_HANDLER(VHOST_USER_RESET_OWNER, vhost_user_reset_owner, false, false) \
VHOST_MESSAGE_HANDLER(VHOST_USER_SET_MEM_TABLE, vhost_user_set_mem_table, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_GET_MAX_MEM_SLOTS, vhost_user_get_max_mem_slots, false, false) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_ADD_MEM_REG, vhost_user_add_mem_reg, true, true) \
+VHOST_MESSAGE_HANDLER(VHOST_USER_REM_MEM_REG, vhost_user_rem_mem_reg, false, true) \
VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_BASE, vhost_user_set_log_base, true, true) \
VHOST_MESSAGE_HANDLER(VHOST_USER_SET_LOG_FD, vhost_user_set_log_fd, true, true) \
VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_NUM, vhost_user_set_vring_num, false, true) \
@@ -1520,7 +1523,6 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
struct virtio_net *dev = *pdev;
struct VhostUserMemory *memory = &ctx->msg.payload.memory;
struct rte_vhost_mem_region *reg;
- int numa_node = SOCKET_ID_ANY;
uint64_t mmap_offset;
uint32_t i;
bool async_notify = false;
@@ -1565,39 +1567,13 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_flush_all(dev);
- free_mem_region(dev);
+ free_all_mem_regions(dev);
rte_free(dev->mem);
dev->mem = NULL;
}
- /*
- * If VQ 0 has already been allocated, try to allocate on the same
- * NUMA node. It can be reallocated later in numa_realloc().
- */
- if (dev->nr_vring > 0)
- numa_node = dev->virtqueue[0]->numa_node;
-
- dev->nr_guest_pages = 0;
- if (dev->guest_pages == NULL) {
- dev->max_guest_pages = 8;
- dev->guest_pages = rte_zmalloc_socket(NULL,
- dev->max_guest_pages *
- sizeof(struct guest_page),
- RTE_CACHE_LINE_SIZE,
- numa_node);
- if (dev->guest_pages == NULL) {
- VHOST_CONFIG_LOG(dev->ifname, ERR,
- "failed to allocate memory for dev->guest_pages");
- goto close_msg_fds;
- }
- }
-
- dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) +
- sizeof(struct rte_vhost_mem_region) * memory->nregions, 0, numa_node);
- if (dev->mem == NULL) {
- VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate memory for dev->mem");
- goto free_guest_pages;
- }
+ if (vhost_user_initialize_memory(pdev) < 0)
+ goto close_msg_fds;
for (i = 0; i < memory->nregions; i++) {
reg = &dev->mem->regions[i];
@@ -1661,11 +1637,182 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
return RTE_VHOST_MSG_RESULT_OK;
free_mem_table:
- free_mem_region(dev);
+ free_all_mem_regions(dev);
rte_free(dev->mem);
dev->mem = NULL;
+ rte_free(dev->guest_pages);
+ dev->guest_pages = NULL;
+close_msg_fds:
+ close_msg_fds(ctx);
+ return RTE_VHOST_MSG_RESULT_ERR;
+}
+
-free_guest_pages:
+static int
+vhost_user_get_max_mem_slots(struct virtio_net **pdev __rte_unused,
+ struct vhu_msg_context *ctx,
+ int main_fd __rte_unused)
+{
+ uint32_t max_mem_slots = VHOST_MEMORY_MAX_NREGIONS;
+
+ ctx->msg.payload.u64 = (uint64_t)max_mem_slots;
+ ctx->msg.size = sizeof(ctx->msg.payload.u64);
+ ctx->fd_num = 0;
+
+ return RTE_VHOST_MSG_RESULT_REPLY;
+}
+
+static void
+dev_invalidate_vrings(struct virtio_net *dev)
+{
+ uint32_t i;
+
+ for (i = 0; i < dev->nr_vring; i++) {
+ struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+ if (!vq)
+ continue;
+
+ if (vq->desc || vq->avail || vq->used) {
+ /* vhost_user_lock_all_queue_pairs locked all qps */
+ VHOST_USER_ASSERT_LOCK(dev, vq, VHOST_USER_ADD_MEM_REG);
+
+ /*
+ * If the memory table got updated, the ring addresses
+ * need to be translated again as virtual addresses have
+ * changed.
+ */
+ vring_invalidate(dev, vq);
+
+ translate_ring_addresses(&dev, &vq);
+ }
+ }
+}
+
+static int
+vhost_user_add_mem_reg(struct virtio_net **pdev,
+ struct vhu_msg_context *ctx,
+ int main_fd __rte_unused)
+{
+ uint32_t i;
+ struct virtio_net *dev = *pdev;
+ struct VhostUserMemoryRegion *region = &ctx->msg.payload.memory_single.region;
+
+ /* convert first region add to normal memory table set */
+ if (dev->mem == NULL) {
+ if (vhost_user_initialize_memory(pdev) < 0)
+ goto close_msg_fds;
+ }
+
+ /* make sure new region will fit */
+ if (dev->mem->nregions >= VHOST_MEMORY_MAX_NREGIONS) {
+ VHOST_CONFIG_LOG(dev->ifname, ERR, "too many memory regions already (%u)",
+ dev->mem->nregions);
+ goto close_msg_fds;
+ }
+
+ /* make sure supplied memory fd present */
+ if (ctx->fd_num != 1) {
+ VHOST_CONFIG_LOG(dev->ifname, ERR, "fd count makes no sense (%u)", ctx->fd_num);
+ goto close_msg_fds;
+ }
+
+ /* Make sure no overlap in guest virtual address space */
+ if (dev->mem != NULL && dev->mem->nregions > 0) {
+ for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
+ struct rte_vhost_mem_region *current_region = &dev->mem->regions[i];
+
+ if (current_region->mmap_size == 0)
+ continue;
+
+ uint64_t current_region_guest_start = current_region->guest_user_addr;
+ uint64_t current_region_guest_end = current_region_guest_start
+ + current_region->mmap_size - 1;
+ uint64_t proposed_region_guest_start = region->userspace_addr;
+ uint64_t proposed_region_guest_end = proposed_region_guest_start
+ + region->memory_size - 1;
+ bool overlap = false;
+
+ bool curent_region_guest_start_overlap =
+ current_region_guest_start >= proposed_region_guest_start &&
+ current_region_guest_start <= proposed_region_guest_end;
+ bool curent_region_guest_end_overlap =
+ current_region_guest_end >= proposed_region_guest_start &&
+ current_region_guest_end <= proposed_region_guest_end;
+ bool proposed_region_guest_start_overlap =
+ proposed_region_guest_start >= current_region_guest_start &&
+ proposed_region_guest_start <= current_region_guest_end;
+ bool proposed_region_guest_end_overlap =
+ proposed_region_guest_end >= current_region_guest_start &&
+ proposed_region_guest_end <= current_region_guest_end;
+
+ overlap = curent_region_guest_start_overlap
+ || curent_region_guest_end_overlap
+ || proposed_region_guest_start_overlap
+ || proposed_region_guest_end_overlap;
+
+ if (overlap) {
+ VHOST_CONFIG_LOG(dev->ifname, ERR,
+ "requested memory region overlaps with another region");
+ VHOST_CONFIG_LOG(dev->ifname, ERR,
+ "\tRequested region address:0x%" PRIx64,
+ region->userspace_addr);
+ VHOST_CONFIG_LOG(dev->ifname, ERR,
+ "\tRequested region size:0x%" PRIx64,
+ region->memory_size);
+ VHOST_CONFIG_LOG(dev->ifname, ERR,
+ "\tOverlapping region address:0x%" PRIx64,
+ current_region->guest_user_addr);
+ VHOST_CONFIG_LOG(dev->ifname, ERR,
+ "\tOverlapping region size:0x%" PRIx64,
+ current_region->mmap_size);
+ goto close_msg_fds;
+ }
+
+ }
+ }
+
+ /* find a new region and set it like memory table set does */
+ struct rte_vhost_mem_region *reg = NULL;
+
+ for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
+ if (dev->mem->regions[i].guest_user_addr == 0) {
+ reg = &dev->mem->regions[i];
+ break;
+ }
+ }
+ if (reg == NULL) {
+ VHOST_CONFIG_LOG(dev->ifname, ERR, "no free memory region");
+ goto close_msg_fds;
+ }
+
+ reg->guest_phys_addr = region->guest_phys_addr;
+ reg->guest_user_addr = region->userspace_addr;
+ reg->size = region->memory_size;
+ reg->fd = ctx->fds[0];
+
+ if (vhost_user_mmap_region(dev, reg, region->mmap_offset) < 0) {
+ VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to mmap region");
+ goto close_msg_fds;
+ }
+
+ dev->mem->nregions++;
+
+ if (dev->async_copy && rte_vfio_is_enabled("vfio"))
+ async_dma_map_region(dev, reg, true);
+
+ if (vhost_user_postcopy_register(dev, main_fd, ctx) < 0)
+ goto free_mem_table;
+
+ dev_invalidate_vrings(dev);
+ dump_guest_pages(dev);
+
+ return RTE_VHOST_MSG_RESULT_OK;
+
+free_mem_table:
+ free_all_mem_regions(dev);
+ rte_free(dev->mem);
+ dev->mem = NULL;
rte_free(dev->guest_pages);
dev->guest_pages = NULL;
close_msg_fds:
@@ -1673,6 +1820,44 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
return RTE_VHOST_MSG_RESULT_ERR;
}
+static int
+vhost_user_rem_mem_reg(struct virtio_net **pdev __rte_unused,
+ struct vhu_msg_context *ctx __rte_unused,
+ int main_fd __rte_unused)
+{
+ uint32_t i;
+ struct virtio_net *dev = *pdev;
+ struct VhostUserMemoryRegion *region = &ctx->msg.payload.memory_single.region;
+
+ if (dev->mem != NULL && dev->mem->nregions > 0) {
+ for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
+ struct rte_vhost_mem_region *current_region = &dev->mem->regions[i];
+
+ if (current_region->guest_user_addr == 0)
+ continue;
+
+ /*
+ * According to the vhost-user specification:
+ * The memory region to be removed is identified by its guest address,
+ * user address and size. The mmap offset is ignored.
+ */
+ if (region->userspace_addr == current_region->guest_user_addr
+ && region->guest_phys_addr == current_region->guest_phys_addr
+ && region->memory_size == current_region->size) {
+ if (dev->async_copy && rte_vfio_is_enabled("vfio"))
+ async_dma_map_region(dev, current_region, false);
+ dev_invalidate_vrings(dev);
+ free_mem_region(current_region);
+ dev->mem->nregions--;
+ return RTE_VHOST_MSG_RESULT_OK;
+ }
+ }
+ }
+
+ VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to find region");
+ return RTE_VHOST_MSG_RESULT_ERR;
+}
+
static bool
vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
--
2.43.0
next prev parent reply other threads:[~2025-11-11 11:46 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-11 11:45 [PATCH v4 0/5] Support add/remove memory region & get-max-slots Pravin M Bathija
2025-11-11 11:45 ` [PATCH v4 1/5] vhost: add user to mailmap and define to vhost hdr Pravin M Bathija
2025-11-11 11:45 ` [PATCH v4 2/5] vhost_user: header defines for add/rem mem region Pravin M Bathija
2025-11-11 11:45 ` [PATCH v4 3/5] vhost_user: support function defines for back-end Pravin M Bathija
2025-11-11 11:45 ` Pravin M Bathija [this message]
2025-11-11 11:45 ` [PATCH v4 5/5] vhost_user: Increase number of memory regions Pravin M Bathija
-- strict thread matches above, loose matches on Subject: below --
2025-11-11 11:39 [PATCH v4 0/5] Support add/remove memory region & get-max-slots Pravin M Bathija
2025-11-11 11:40 ` [PATCH v4 4/5] vhost_user: Function defs for add/rem mem regions Pravin M Bathija
2025-11-11 11:12 [PATCH v4 0/5] Support add/remove memory region & get-max-slots Pravin M Bathija
2025-11-11 11:12 ` [PATCH v4 4/5] vhost_user: Function defs for add/rem mem regions Pravin M Bathija
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251111114521.2902867-5-pravin.bathija@dell.com \
--to=pravin.bathija@dell.com \
--cc=dev@dpdk.org \
--cc=pravin.m.bathija.dev@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).