* [dpdk-dev] [PATCH] vhost: remove async dma map status
@ 2021-10-27 9:55 Xuan Ding
2021-10-27 10:00 ` [dpdk-dev] [PATCH v2] " Xuan Ding
0 siblings, 1 reply; 5+ messages in thread
From: Xuan Ding @ 2021-10-27 9:55 UTC (permalink / raw)
To: dev, maxime.coquelin, chenbo.xia; +Cc: jiayu.hu, Xuan Ding, anatoly.burakov
Async dma map status flag was added to prevent the unnecessary unmap
when DMA devices bound to kernel driver. This brings maintenance cost
for a lot of code. This patch removes the dma map status by using
rte_errno instead.
This patch relies on the following patch to fix a partial
unmap check in vfio unmaping API.
[1] https://www.mail-archive.com/dev@dpdk.org/msg226464.html
Cc: anatoly.burakov@intel.com
Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
lib/vhost/vhost.h | 3 --
lib/vhost/vhost_user.c | 70 ++++++++----------------------------------
2 files changed, 13 insertions(+), 60 deletions(-)
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 6c6a2da2c9..71fddf3592 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -370,9 +370,6 @@ struct virtio_net {
uint32_t nr_vring;
int async_copy;
- /* Record the dma map status for each region. */
- bool *async_map_status;
-
int extbuf;
int linearbuf;
struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 720d1c1c9d..9489d03e45 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -144,7 +144,7 @@ get_blk_size(int fd)
}
static int
-async_dma_map(struct rte_vhost_mem_region *region, bool *dma_map_success, bool do_map)
+async_dma_map(struct rte_vhost_mem_region *region, bool do_map)
{
uint64_t host_iova;
int ret = 0;
@@ -156,8 +156,6 @@ async_dma_map(struct rte_vhost_mem_region *region, bool *dma_map_success, bool d
region->host_user_addr,
host_iova,
region->size);
- *dma_map_success = ret == 0;
-
if (ret) {
/*
* DMA device may bind with kernel driver, in this case,
@@ -175,26 +173,24 @@ async_dma_map(struct rte_vhost_mem_region *region, bool *dma_map_success, bool d
return 0;
VHOST_LOG_CONFIG(ERR, "DMA engine map failed\n");
- return ret;
-
+ /* DMA mapping errors won't stop VHST_USER_SET_MEM_TABLE. */
+ return 0;
}
} else {
- /* No need to do vfio unmap if the map failed. */
- if (!*dma_map_success)
- return 0;
-
/* Remove mapped region from the default container of DPDK. */
ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
region->host_user_addr,
host_iova,
region->size);
if (ret) {
+ /* like DMA map, ignore the kernel driver case when unmap. */
+ if (rte_errno == EINVAL)
+ return 0;
+
VHOST_LOG_CONFIG(ERR, "DMA engine unmap failed\n");
return ret;
}
- /* Clear the flag once the unmap succeeds. */
- *dma_map_success = 0;
}
return ret;
@@ -213,7 +209,7 @@ free_mem_region(struct virtio_net *dev)
reg = &dev->mem->regions[i];
if (reg->host_user_addr) {
if (dev->async_copy && rte_vfio_is_enabled("vfio"))
- async_dma_map(reg, &dev->async_map_status[i], false);
+ async_dma_map(reg, false);
munmap(reg->mmap_addr, reg->mmap_size);
close(reg->fd);
@@ -228,11 +224,6 @@ vhost_backend_cleanup(struct virtio_net *dev)
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
-
- if (dev->async_map_status) {
- rte_free(dev->async_map_status);
- dev->async_map_status = NULL;
- }
}
rte_free(dev->guest_pages);
@@ -688,19 +679,6 @@ numa_realloc(struct virtio_net *dev, int index)
}
dev->mem = mem;
- if (dev->async_copy && rte_vfio_is_enabled("vfio")) {
- if (dev->async_map_status == NULL) {
- dev->async_map_status = rte_zmalloc_socket("async-dma-map-status",
- sizeof(bool) * dev->mem->nregions, 0, node);
- if (!dev->async_map_status) {
- VHOST_LOG_CONFIG(ERR,
- "(%d) failed to realloc dma mapping status on node\n",
- dev->vid);
- return dev;
- }
- }
- }
-
gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp),
RTE_CACHE_LINE_SIZE, node);
if (!gp) {
@@ -1231,7 +1209,6 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
static int
vhost_user_mmap_region(struct virtio_net *dev,
struct rte_vhost_mem_region *region,
- uint32_t region_index,
uint64_t mmap_offset)
{
void *mmap_addr;
@@ -1294,16 +1271,14 @@ vhost_user_mmap_region(struct virtio_net *dev,
if (dev->async_copy) {
if (add_guest_pages(dev, region, alignment) < 0) {
- VHOST_LOG_CONFIG(ERR,
- "adding guest pages to region failed.\n");
+ VHOST_LOG_CONFIG(ERR, "adding guest pages to region failed.\n");
return -1;
}
if (rte_vfio_is_enabled("vfio")) {
- ret = async_dma_map(region, &dev->async_map_status[region_index], true);
+ ret = async_dma_map(region, true);
if (ret) {
- VHOST_LOG_CONFIG(ERR, "Configure IOMMU for DMA "
- "engine failed\n");
+ VHOST_LOG_CONFIG(ERR, "Configure IOMMU for DMA engine failed\n");
return -1;
}
}
@@ -1381,11 +1356,6 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
-
- if (dev->async_map_status) {
- rte_free(dev->async_map_status);
- dev->async_map_status = NULL;
- }
}
/* Flush IOTLB cache as previous HVAs are now invalid */
@@ -1426,17 +1396,6 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
goto free_guest_pages;
}
- if (dev->async_copy) {
- dev->async_map_status = rte_zmalloc_socket("async-dma-map-status",
- sizeof(bool) * memory->nregions, 0, numa_node);
- if (!dev->async_map_status) {
- VHOST_LOG_CONFIG(ERR,
- "(%d) failed to allocate memory for dma mapping status\n",
- dev->vid);
- goto free_mem_table;
- }
- }
-
for (i = 0; i < memory->nregions; i++) {
reg = &dev->mem->regions[i];
@@ -1453,7 +1412,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
mmap_offset = memory->regions[i].mmap_offset;
- if (vhost_user_mmap_region(dev, reg, i, mmap_offset) < 0) {
+ if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) {
VHOST_LOG_CONFIG(ERR, "Failed to mmap region %u\n", i);
goto free_mem_table;
}
@@ -1501,10 +1460,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
- if (dev->async_map_status) {
- rte_free(dev->async_map_status);
- dev->async_map_status = NULL;
- }
+
free_guest_pages:
rte_free(dev->guest_pages);
dev->guest_pages = NULL;
--
2.17.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [dpdk-dev] [PATCH v2] vhost: remove async dma map status
2021-10-27 9:55 [dpdk-dev] [PATCH] vhost: remove async dma map status Xuan Ding
@ 2021-10-27 10:00 ` Xuan Ding
2021-10-29 9:36 ` Maxime Coquelin
2021-10-29 10:35 ` Maxime Coquelin
0 siblings, 2 replies; 5+ messages in thread
From: Xuan Ding @ 2021-10-27 10:00 UTC (permalink / raw)
To: dev, maxime.coquelin, chenbo.xia; +Cc: jiayu.hu, Xuan Ding, anatoly.burakov
Async dma map status flag was added to prevent the unnecessary unmap
when DMA devices bound to kernel driver. This brings maintenance cost
for a lot of code. This patch removes the dma map status by using
rte_errno instead.
This patch relies on the following patch to fix a partial
unmap check in vfio unmapping API.
[1] https://www.mail-archive.com/dev@dpdk.org/msg226464.html
Cc: anatoly.burakov@intel.com
Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
v2:
* Fix a typo in commit log.
---
lib/vhost/vhost.h | 3 --
lib/vhost/vhost_user.c | 70 ++++++++----------------------------------
2 files changed, 13 insertions(+), 60 deletions(-)
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 6c6a2da2c9..71fddf3592 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -370,9 +370,6 @@ struct virtio_net {
uint32_t nr_vring;
int async_copy;
- /* Record the dma map status for each region. */
- bool *async_map_status;
-
int extbuf;
int linearbuf;
struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 720d1c1c9d..9489d03e45 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -144,7 +144,7 @@ get_blk_size(int fd)
}
static int
-async_dma_map(struct rte_vhost_mem_region *region, bool *dma_map_success, bool do_map)
+async_dma_map(struct rte_vhost_mem_region *region, bool do_map)
{
uint64_t host_iova;
int ret = 0;
@@ -156,8 +156,6 @@ async_dma_map(struct rte_vhost_mem_region *region, bool *dma_map_success, bool d
region->host_user_addr,
host_iova,
region->size);
- *dma_map_success = ret == 0;
-
if (ret) {
/*
* DMA device may bind with kernel driver, in this case,
@@ -175,26 +173,24 @@ async_dma_map(struct rte_vhost_mem_region *region, bool *dma_map_success, bool d
return 0;
VHOST_LOG_CONFIG(ERR, "DMA engine map failed\n");
- return ret;
-
+ /* DMA mapping errors won't stop VHST_USER_SET_MEM_TABLE. */
+ return 0;
}
} else {
- /* No need to do vfio unmap if the map failed. */
- if (!*dma_map_success)
- return 0;
-
/* Remove mapped region from the default container of DPDK. */
ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
region->host_user_addr,
host_iova,
region->size);
if (ret) {
+ /* like DMA map, ignore the kernel driver case when unmap. */
+ if (rte_errno == EINVAL)
+ return 0;
+
VHOST_LOG_CONFIG(ERR, "DMA engine unmap failed\n");
return ret;
}
- /* Clear the flag once the unmap succeeds. */
- *dma_map_success = 0;
}
return ret;
@@ -213,7 +209,7 @@ free_mem_region(struct virtio_net *dev)
reg = &dev->mem->regions[i];
if (reg->host_user_addr) {
if (dev->async_copy && rte_vfio_is_enabled("vfio"))
- async_dma_map(reg, &dev->async_map_status[i], false);
+ async_dma_map(reg, false);
munmap(reg->mmap_addr, reg->mmap_size);
close(reg->fd);
@@ -228,11 +224,6 @@ vhost_backend_cleanup(struct virtio_net *dev)
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
-
- if (dev->async_map_status) {
- rte_free(dev->async_map_status);
- dev->async_map_status = NULL;
- }
}
rte_free(dev->guest_pages);
@@ -688,19 +679,6 @@ numa_realloc(struct virtio_net *dev, int index)
}
dev->mem = mem;
- if (dev->async_copy && rte_vfio_is_enabled("vfio")) {
- if (dev->async_map_status == NULL) {
- dev->async_map_status = rte_zmalloc_socket("async-dma-map-status",
- sizeof(bool) * dev->mem->nregions, 0, node);
- if (!dev->async_map_status) {
- VHOST_LOG_CONFIG(ERR,
- "(%d) failed to realloc dma mapping status on node\n",
- dev->vid);
- return dev;
- }
- }
- }
-
gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp),
RTE_CACHE_LINE_SIZE, node);
if (!gp) {
@@ -1231,7 +1209,6 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
static int
vhost_user_mmap_region(struct virtio_net *dev,
struct rte_vhost_mem_region *region,
- uint32_t region_index,
uint64_t mmap_offset)
{
void *mmap_addr;
@@ -1294,16 +1271,14 @@ vhost_user_mmap_region(struct virtio_net *dev,
if (dev->async_copy) {
if (add_guest_pages(dev, region, alignment) < 0) {
- VHOST_LOG_CONFIG(ERR,
- "adding guest pages to region failed.\n");
+ VHOST_LOG_CONFIG(ERR, "adding guest pages to region failed.\n");
return -1;
}
if (rte_vfio_is_enabled("vfio")) {
- ret = async_dma_map(region, &dev->async_map_status[region_index], true);
+ ret = async_dma_map(region, true);
if (ret) {
- VHOST_LOG_CONFIG(ERR, "Configure IOMMU for DMA "
- "engine failed\n");
+ VHOST_LOG_CONFIG(ERR, "Configure IOMMU for DMA engine failed\n");
return -1;
}
}
@@ -1381,11 +1356,6 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
-
- if (dev->async_map_status) {
- rte_free(dev->async_map_status);
- dev->async_map_status = NULL;
- }
}
/* Flush IOTLB cache as previous HVAs are now invalid */
@@ -1426,17 +1396,6 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
goto free_guest_pages;
}
- if (dev->async_copy) {
- dev->async_map_status = rte_zmalloc_socket("async-dma-map-status",
- sizeof(bool) * memory->nregions, 0, numa_node);
- if (!dev->async_map_status) {
- VHOST_LOG_CONFIG(ERR,
- "(%d) failed to allocate memory for dma mapping status\n",
- dev->vid);
- goto free_mem_table;
- }
- }
-
for (i = 0; i < memory->nregions; i++) {
reg = &dev->mem->regions[i];
@@ -1453,7 +1412,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
mmap_offset = memory->regions[i].mmap_offset;
- if (vhost_user_mmap_region(dev, reg, i, mmap_offset) < 0) {
+ if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) {
VHOST_LOG_CONFIG(ERR, "Failed to mmap region %u\n", i);
goto free_mem_table;
}
@@ -1501,10 +1460,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
- if (dev->async_map_status) {
- rte_free(dev->async_map_status);
- dev->async_map_status = NULL;
- }
+
free_guest_pages:
rte_free(dev->guest_pages);
dev->guest_pages = NULL;
--
2.17.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH v2] vhost: remove async dma map status
2021-10-27 10:00 ` [dpdk-dev] [PATCH v2] " Xuan Ding
@ 2021-10-29 9:36 ` Maxime Coquelin
2021-10-29 10:35 ` Maxime Coquelin
1 sibling, 0 replies; 5+ messages in thread
From: Maxime Coquelin @ 2021-10-29 9:36 UTC (permalink / raw)
To: Xuan Ding, dev, chenbo.xia; +Cc: jiayu.hu, anatoly.burakov
On 10/27/21 12:00, Xuan Ding wrote:
> Async dma map status flag was added to prevent the unnecessary unmap
> when DMA devices bound to kernel driver. This brings maintenance cost
> for a lot of code. This patch removes the dma map status by using
> rte_errno instead.
>
> This patch relies on the following patch to fix a partial
> unmap check in vfio unmapping API.
> [1] https://www.mail-archive.com/dev@dpdk.org/msg226464.html
>
> Cc: anatoly.burakov@intel.com
>
> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> ---
> v2:
> * Fix a typo in commit log.
> ---
> lib/vhost/vhost.h | 3 --
> lib/vhost/vhost_user.c | 70 ++++++++----------------------------------
> 2 files changed, 13 insertions(+), 60 deletions(-)
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH v2] vhost: remove async dma map status
2021-10-27 10:00 ` [dpdk-dev] [PATCH v2] " Xuan Ding
2021-10-29 9:36 ` Maxime Coquelin
@ 2021-10-29 10:35 ` Maxime Coquelin
2021-10-30 13:46 ` Ding, Xuan
1 sibling, 1 reply; 5+ messages in thread
From: Maxime Coquelin @ 2021-10-29 10:35 UTC (permalink / raw)
To: Xuan Ding, dev, chenbo.xia; +Cc: jiayu.hu, anatoly.burakov
On 10/27/21 12:00, Xuan Ding wrote:
> Async dma map status flag was added to prevent the unnecessary unmap
> when DMA devices bound to kernel driver. This brings maintenance cost
> for a lot of code. This patch removes the dma map status by using
> rte_errno instead.
>
> This patch relies on the following patch to fix a partial
> unmap check in vfio unmapping API.
> [1] https://www.mail-archive.com/dev@dpdk.org/msg226464.html
>
> Cc: anatoly.burakov@intel.com
>
> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> ---
> v2:
> * Fix a typo in commit log.
> ---
> lib/vhost/vhost.h | 3 --
> lib/vhost/vhost_user.c | 70 ++++++++----------------------------------
> 2 files changed, 13 insertions(+), 60 deletions(-)
>
Applied to dpdk-next-virtio/main with title fixed.
Please run check-git-log script next time.
Thanks,
Maxime
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH v2] vhost: remove async dma map status
2021-10-29 10:35 ` Maxime Coquelin
@ 2021-10-30 13:46 ` Ding, Xuan
0 siblings, 0 replies; 5+ messages in thread
From: Ding, Xuan @ 2021-10-30 13:46 UTC (permalink / raw)
To: Maxime Coquelin, dev, Xia, Chenbo; +Cc: Hu, Jiayu, Burakov, Anatoly
Hi Maxime,
>-----Original Message-----
>From: Maxime Coquelin <maxime.coquelin@redhat.com>
>Sent: Friday, October 29, 2021 6:36 PM
>To: Ding, Xuan <xuan.ding@intel.com>; dev@dpdk.org; Xia, Chenbo
><chenbo.xia@intel.com>
>Cc: Hu, Jiayu <jiayu.hu@intel.com>; Burakov, Anatoly
><anatoly.burakov@intel.com>
>Subject: Re: [PATCH v2] vhost: remove async dma map status
>
>
>
>On 10/27/21 12:00, Xuan Ding wrote:
>> Async dma map status flag was added to prevent the unnecessary unmap
>> when DMA devices bound to kernel driver. This brings maintenance cost
>> for a lot of code. This patch removes the dma map status by using
>> rte_errno instead.
>>
>> This patch relies on the following patch to fix a partial unmap check
>> in vfio unmapping API.
>> [1] https://www.mail-archive.com/dev@dpdk.org/msg226464.html
>>
>> Cc: anatoly.burakov@intel.com
>>
>> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
>> ---
>> v2:
>> * Fix a typo in commit log.
>> ---
>> lib/vhost/vhost.h | 3 --
>> lib/vhost/vhost_user.c | 70 ++++++++----------------------------------
>> 2 files changed, 13 insertions(+), 60 deletions(-)
>>
>
>
>Applied to dpdk-next-virtio/main with title fixed.
>Please run check-git-log script next time.
Thanks for your fix. I ran the script first but got no warning...
I will be more careful to check the format next time.
BTW, should the CC be removed in the commit log?
Regards,
Xuan
>
>Thanks,
>Maxime
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2021-10-30 13:46 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-27 9:55 [dpdk-dev] [PATCH] vhost: remove async dma map status Xuan Ding
2021-10-27 10:00 ` [dpdk-dev] [PATCH v2] " Xuan Ding
2021-10-29 9:36 ` Maxime Coquelin
2021-10-29 10:35 ` Maxime Coquelin
2021-10-30 13:46 ` Ding, Xuan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).