From: Xuan Ding <xuan.ding@intel.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org, jiayu.hu@intel.com, sunil.pai.g@intel.com,
bruce.richardson@intel.com, harry.van.haaren@intel.com,
yong.liu@intel.com, wenwux.ma@intel.com,
Xuan Ding <xuan.ding@intel.com>
Subject: [dpdk-dev] [PATCH v4 1/2] vhost: enable IOMMU for async vhost
Date: Mon, 5 Jul 2021 08:19:27 +0000 [thread overview]
Message-ID: <20210705081928.98546-2-xuan.ding@intel.com> (raw)
In-Reply-To: <20210705081928.98546-1-xuan.ding@intel.com>
The use of IOMMU has many advantages, such as isolation and address
translation. This patch extends the capbility of DMA engine to use
IOMMU if the DMA device is bound to vfio.
When set memory table, the guest memory will be mapped
into the default container of DPDK.
Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
doc/guides/prog_guide/vhost_lib.rst | 9 ++++++
lib/vhost/rte_vhost.h | 1 +
lib/vhost/socket.c | 9 ++++++
lib/vhost/vhost.h | 1 +
lib/vhost/vhost_user.c | 46 ++++++++++++++++++++++++++++-
5 files changed, 65 insertions(+), 1 deletion(-)
diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index 05c42c9b11..c3beda23d9 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -118,6 +118,15 @@ The following is an overview of some key Vhost API functions:
It is disabled by default.
+ - ``RTE_VHOST_USER_ASYNC_USE_VFIO``
+
+ In asynchronous data path, vhost liarary is not aware of which driver
+ (igb_uio/vfio) the DMA device is bound to. Application should pass
+ this flag to tell vhost library whether IOMMU should be programmed
+ for guest memory.
+
+ It is disabled by default.
+
- ``RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS``
Since v16.04, the vhost library forwards checksum and gso requests for
diff --git a/lib/vhost/rte_vhost.h b/lib/vhost/rte_vhost.h
index 8d875e9322..a766ea7b6b 100644
--- a/lib/vhost/rte_vhost.h
+++ b/lib/vhost/rte_vhost.h
@@ -37,6 +37,7 @@ extern "C" {
#define RTE_VHOST_USER_LINEARBUF_SUPPORT (1ULL << 6)
#define RTE_VHOST_USER_ASYNC_COPY (1ULL << 7)
#define RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS (1ULL << 8)
+#define RTE_VHOST_USER_ASYNC_USE_VFIO (1ULL << 9)
/* Features. */
#ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c
index 5d0d728d52..77c722c86b 100644
--- a/lib/vhost/socket.c
+++ b/lib/vhost/socket.c
@@ -42,6 +42,7 @@ struct vhost_user_socket {
bool extbuf;
bool linearbuf;
bool async_copy;
+ bool async_use_vfio;
bool net_compliant_ol_flags;
/*
@@ -243,6 +244,13 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
dev->async_copy = 1;
}
+ if (vsocket->async_use_vfio) {
+ dev = get_device(vid);
+
+ if (dev)
+ dev->async_use_vfio = 1;
+ }
+
VHOST_LOG_CONFIG(INFO, "new device, handle is %d\n", vid);
if (vsocket->notify_ops->new_connection) {
@@ -879,6 +887,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
vsocket->extbuf = flags & RTE_VHOST_USER_EXTBUF_SUPPORT;
vsocket->linearbuf = flags & RTE_VHOST_USER_LINEARBUF_SUPPORT;
vsocket->async_copy = flags & RTE_VHOST_USER_ASYNC_COPY;
+ vsocket->async_use_vfio = flags & RTE_VHOST_USER_ASYNC_USE_VFIO;
vsocket->net_compliant_ol_flags = flags & RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS;
if (vsocket->async_copy &&
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 8078ddff79..fb775ce4ed 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -370,6 +370,7 @@ struct virtio_net {
int16_t broadcast_rarp;
uint32_t nr_vring;
int async_copy;
+ int async_use_vfio;
int extbuf;
int linearbuf;
struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 8f0eba6412..72459e192f 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -45,6 +45,7 @@
#include <rte_common.h>
#include <rte_malloc.h>
#include <rte_log.h>
+#include <rte_vfio.h>
#include "iotlb.h"
#include "vhost.h"
@@ -141,6 +142,36 @@ get_blk_size(int fd)
return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
}
+static int
+async_dma_map(struct rte_vhost_mem_region *region, bool do_map)
+{
+ int ret = 0;
+ uint64_t host_iova;
+ host_iova = rte_mem_virt2iova((void *)(uintptr_t)region->host_user_addr);
+ if (do_map) {
+ /* Add mapped region into the default container of DPDK. */
+ ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
+ region->host_user_addr,
+ host_iova,
+ region->size);
+ if (ret) {
+ VHOST_LOG_CONFIG(ERR, "DMA engine map failed\n");
+ return ret;
+ }
+ } else {
+ /* Remove mapped region from the default container of DPDK. */
+ ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
+ region->host_user_addr,
+ host_iova,
+ region->size);
+ if (ret) {
+ VHOST_LOG_CONFIG(ERR, "DMA engine unmap failed\n");
+ return ret;
+ }
+ }
+ return ret;
+}
+
static void
free_mem_region(struct virtio_net *dev)
{
@@ -155,6 +186,9 @@ free_mem_region(struct virtio_net *dev)
if (reg->host_user_addr) {
munmap(reg->mmap_addr, reg->mmap_size);
close(reg->fd);
+
+ if (dev->async_copy && dev->async_use_vfio)
+ async_dma_map(reg, false);
}
}
}
@@ -1105,6 +1139,7 @@ vhost_user_mmap_region(struct virtio_net *dev,
uint64_t mmap_size;
uint64_t alignment;
int populate;
+ int ret;
/* Check for memory_size + mmap_offset overflow */
if (mmap_offset >= -region->size) {
@@ -1158,13 +1193,22 @@ vhost_user_mmap_region(struct virtio_net *dev,
region->mmap_size = mmap_size;
region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset;
- if (dev->async_copy)
+ if (dev->async_copy) {
if (add_guest_pages(dev, region, alignment) < 0) {
VHOST_LOG_CONFIG(ERR,
"adding guest pages to region failed.\n");
return -1;
}
+ if (dev->async_use_vfio) {
+ ret = async_dma_map(region, true);
+ if (ret) {
+ VHOST_LOG_CONFIG(ERR, "Configure IOMMU for DMA engine failed\n");
+ return -1;
+ }
+ }
+ }
+
VHOST_LOG_CONFIG(INFO,
"guest memory region size: 0x%" PRIx64 "\n"
"\t guest physical addr: 0x%" PRIx64 "\n"
--
2.17.1
next prev parent reply other threads:[~2021-07-05 8:26 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-31 15:06 [dpdk-dev] [PATCH v1] lib/vhost: " xuan.ding
2021-06-02 14:26 ` [dpdk-dev] [PATCH v2] " xuan.ding
2021-06-03 17:30 ` [dpdk-dev] [PATCH v3] vhost: " xuan.ding
2021-06-18 16:17 ` Maxime Coquelin
2021-06-21 3:57 ` Hu, Jiayu
2021-06-22 6:18 ` Ding, Xuan
2021-06-29 9:23 ` Maxime Coquelin
2021-07-01 5:12 ` Ding, Xuan
2021-07-05 8:19 ` [dpdk-dev] [PATCH v4 0/2] vhost: add IOMMU support in async data path Xuan Ding
2021-07-05 8:19 ` Xuan Ding [this message]
2021-07-05 8:19 ` [dpdk-dev] [PATCH v4 2/2] example/vhost: add dma vfio parsing Xuan Ding
2021-07-05 8:40 ` [dpdk-dev] [PATCH v5 0/2] vhost: add IOMMU support in async data path Xuan Ding
2021-07-05 8:40 ` [dpdk-dev] [PATCH v5 1/2] vhost: enable IOMMU for async vhost Xuan Ding
2021-07-05 12:16 ` Burakov, Anatoly
2021-07-05 12:45 ` Maxime Coquelin
2021-07-06 8:31 ` Ding, Xuan
2021-07-06 9:16 ` Burakov, Anatoly
2021-07-06 9:32 ` Maxime Coquelin
2021-07-07 6:25 ` Ding, Xuan
2021-07-07 12:17 ` Burakov, Anatoly
2021-07-07 12:54 ` Ding, Xuan
2021-07-07 14:33 ` Burakov, Anatoly
2021-07-07 15:00 ` Bruce Richardson
2021-07-07 15:09 ` Ding, Xuan
2021-07-05 8:40 ` [dpdk-dev] [PATCH v5 2/2] example/vhost: add dma vfio parsing Xuan Ding
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210705081928.98546-2-xuan.ding@intel.com \
--to=xuan.ding@intel.com \
--cc=bruce.richardson@intel.com \
--cc=chenbo.xia@intel.com \
--cc=dev@dpdk.org \
--cc=harry.van.haaren@intel.com \
--cc=jiayu.hu@intel.com \
--cc=maxime.coquelin@redhat.com \
--cc=sunil.pai.g@intel.com \
--cc=wenwux.ma@intel.com \
--cc=yong.liu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).