From: Pravin M Bathija <pravin.bathija@dell.com>
To: <dev@dpdk.org>
Cc: <pravin.bathija@dell.com>, <pravin.m.bathija.dev@gmail.com>
Subject: [PATCH v4 3/5] vhost_user: support function defines for back-end
Date: Tue, 11 Nov 2025 11:40:00 +0000 [thread overview]
Message-ID: <20251111114002.2902696-4-pravin.bathija@dell.com> (raw)
In-Reply-To: <20251111114002.2902696-1-pravin.bathija@dell.com>
Here we define support functions which are called from the various
vhost-user back-end message functions like set memory table, get
memory slots, add memory region, remove memory region. These are
essentially common functions to initialize memory, unmap a set of
memory regions, perform register copy, align memory addresses and
dma map/unmap a single memory region.
Signed-off-by: Pravin M Bathija <pravin.bathija@dell.com>
---
lib/vhost/vhost_user.c | 150 ++++++++++++++++++++++++++++++++++++++---
1 file changed, 140 insertions(+), 10 deletions(-)
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 4bfb13fb98..7dc21fe42a 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -171,6 +171,80 @@ get_blk_size(int fd)
return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
}
+static void
+async_dma_map_region(struct virtio_net *dev, struct rte_vhost_mem_region *reg, bool do_map)
+{
+ uint32_t i;
+ int ret = 0;
+ int found = 0;
+ struct guest_page *page;
+
+ uint64_t reg_size = reg->size;
+ uint64_t host_user_addr = reg->host_user_addr;
+
+ for (i = 0; i < dev->nr_guest_pages; i++) {
+ page = &dev->guest_pages[i];
+ if (host_user_addr == page->host_user_addr) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ VHOST_CONFIG_LOG(dev->ifname, ERR, "DMA engine map: region not found");
+ return;
+ }
+
+ if (do_map) {
+ while (reg_size) {
+ page = &dev->guest_pages[i];
+ ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
+ page->host_user_addr,
+ page->host_iova,
+ page->size);
+ if (ret) {
+ if (rte_errno == ENODEV)
+ return;
+
+ /* DMA mapping errors won't stop VHOST_USER_SET_MEM_TABLE. */
+ VHOST_CONFIG_LOG(dev->ifname, ERR, "DMA engine map failed");
+ }
+
+ reg_size -= page->size;
+ ++i;
+ if (i >= dev->nr_guest_pages) {
+ /* shouldn't get here */
+ VHOST_CONFIG_LOG(dev->ifname, ERR, "DMA engine map: No more pages");
+ return;
+ }
+ }
+ } else {
+ while (reg_size) {
+ page = &dev->guest_pages[i];
+ ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
+ page->host_user_addr,
+ page->host_iova,
+ page->size);
+ if (ret) {
+ if (rte_errno == EINVAL)
+ return;
+
+ /* DMA mapping errors won't stop VHOST_USER_SET_MEM_TABLE. */
+ VHOST_CONFIG_LOG(dev->ifname, ERR, "DMA engine unmap failed");
+ }
+
+ reg_size -= page->size;
+ ++i;
+ if (i >= dev->nr_guest_pages) {
+ /* shouldn't get here */
+ VHOST_CONFIG_LOG(dev->ifname, ERR, "DMA engine unmap: No more pages");
+ return;
+ }
+ }
+ }
+
+}
+
static void
async_dma_map(struct virtio_net *dev, bool do_map)
{
@@ -225,7 +299,17 @@ async_dma_map(struct virtio_net *dev, bool do_map)
}
static void
-free_mem_region(struct virtio_net *dev)
+free_mem_region(struct rte_vhost_mem_region *reg)
+{
+ if (reg != NULL && reg->host_user_addr) {
+ munmap(reg->mmap_addr, reg->mmap_size);
+ close(reg->fd);
+ memset(reg, 0, sizeof(struct rte_vhost_mem_region));
+ }
+}
+
+static void
+free_all_mem_regions(struct virtio_net *dev)
{
uint32_t i;
struct rte_vhost_mem_region *reg;
@@ -236,12 +320,10 @@ free_mem_region(struct virtio_net *dev)
if (dev->async_copy && rte_vfio_is_enabled("vfio"))
async_dma_map(dev, false);
- for (i = 0; i < dev->mem->nregions; i++) {
+ for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
reg = &dev->mem->regions[i];
- if (reg->host_user_addr) {
- munmap(reg->mmap_addr, reg->mmap_size);
- close(reg->fd);
- }
+ if (reg->mmap_addr)
+ free_mem_region(reg);
}
}
@@ -255,7 +337,7 @@ vhost_backend_cleanup(struct virtio_net *dev)
vdpa_dev->ops->dev_cleanup(dev->vid);
if (dev->mem) {
- free_mem_region(dev);
+ free_all_mem_regions(dev);
rte_free(dev->mem);
dev->mem = NULL;
}
@@ -704,7 +786,7 @@ numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
vhost_devices[dev->vid] = dev;
mem_size = sizeof(struct rte_vhost_memory) +
- sizeof(struct rte_vhost_mem_region) * dev->mem->nregions;
+ sizeof(struct rte_vhost_mem_region) * VHOST_MEMORY_MAX_NREGIONS;
mem = rte_realloc_socket(dev->mem, mem_size, 0, node);
if (!mem) {
VHOST_CONFIG_LOG(dev->ifname, ERR,
@@ -808,8 +890,10 @@ hua_to_alignment(struct rte_vhost_memory *mem, void *ptr)
uint32_t i;
uintptr_t hua = (uintptr_t)ptr;
- for (i = 0; i < mem->nregions; i++) {
+ for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++) {
r = &mem->regions[i];
+ if (r->host_user_addr == 0)
+ continue;
if (hua >= r->host_user_addr &&
hua < r->host_user_addr + r->size) {
return get_blk_size(r->fd);
@@ -1248,8 +1332,12 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
*/
memory = &ctx->msg.payload.memory;
for (i = 0; i < memory->nregions; i++) {
+ int reg_msg_index = 0;
reg = &dev->mem->regions[i];
- memory->regions[i].userspace_addr = reg->host_user_addr;
+ if (reg->host_user_addr == 0)
+ continue;
+ memory->regions[reg_msg_index].userspace_addr = reg->host_user_addr;
+ reg_msg_index++;
}
/* Send the addresses back to qemu */
@@ -1278,6 +1366,8 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
/* Now userfault register and we can use the memory */
for (i = 0; i < memory->nregions; i++) {
reg = &dev->mem->regions[i];
+ if (reg->host_user_addr == 0)
+ continue;
if (vhost_user_postcopy_region_register(dev, reg) < 0)
return -1;
}
@@ -1382,6 +1472,46 @@ vhost_user_mmap_region(struct virtio_net *dev,
return 0;
}
+static int
+vhost_user_initialize_memory(struct virtio_net **pdev)
+{
+ struct virtio_net *dev = *pdev;
+ int numa_node = SOCKET_ID_ANY;
+
+ /*
+ * If VQ 0 has already been allocated, try to allocate on the same
+ * NUMA node. It can be reallocated later in numa_realloc().
+ */
+ if (dev->nr_vring > 0)
+ numa_node = dev->virtqueue[0]->numa_node;
+
+ dev->nr_guest_pages = 0;
+ if (dev->guest_pages == NULL) {
+ dev->max_guest_pages = VHOST_MEMORY_MAX_NREGIONS;
+ dev->guest_pages = rte_zmalloc_socket(NULL,
+ dev->max_guest_pages *
+ sizeof(struct guest_page),
+ RTE_CACHE_LINE_SIZE,
+ numa_node);
+ if (dev->guest_pages == NULL) {
+ VHOST_CONFIG_LOG(dev->ifname, ERR,
+ "failed to allocate memory for dev->guest_pages");
+ return -1;
+ }
+ }
+
+ dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) +
+ sizeof(struct rte_vhost_mem_region) * VHOST_MEMORY_MAX_NREGIONS, 0, numa_node);
+ if (dev->mem == NULL) {
+ VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate memory for dev->mem");
+ rte_free(dev->guest_pages);
+ dev->guest_pages = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
static int
vhost_user_set_mem_table(struct virtio_net **pdev,
struct vhu_msg_context *ctx,
--
2.43.0
next prev parent reply other threads:[~2025-11-11 11:40 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-11 11:39 [PATCH v4 0/5] Support add/remove memory region & get-max-slots Pravin M Bathija
2025-11-11 11:39 ` [PATCH v4 1/5] vhost: add user to mailmap and define to vhost hdr Pravin M Bathija
2025-11-11 11:39 ` [PATCH v4 2/5] vhost_user: header defines for add/rem mem region Pravin M Bathija
2025-11-11 11:40 ` Pravin M Bathija [this message]
2025-11-11 11:40 ` [PATCH v4 4/5] vhost_user: Function defs for add/rem mem regions Pravin M Bathija
2025-11-11 11:40 ` [PATCH v4 5/5] vhost_user: Increase number of memory regions Pravin M Bathija
-- strict thread matches above, loose matches on Subject: below --
2025-11-11 11:45 [PATCH v4 0/5] Support add/remove memory region & get-max-slots Pravin M Bathija
2025-11-11 11:45 ` [PATCH v4 3/5] vhost_user: support function defines for back-end Pravin M Bathija
2025-11-11 11:12 [PATCH v4 0/5] Support add/remove memory region & get-max-slots Pravin M Bathija
2025-11-11 11:12 ` [PATCH v4 3/5] vhost_user: support function defines for back-end Pravin M Bathija
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251111114002.2902696-4-pravin.bathija@dell.com \
--to=pravin.bathija@dell.com \
--cc=dev@dpdk.org \
--cc=pravin.m.bathija.dev@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).