* [dpdk-dev] [PATCH v6 1/3] vhost: translate incoming log address to gpa
2019-10-09 11:54 [dpdk-dev] [PATCH v6 0/3] vhost: add support for IOVA_VA mode Adrian Moreno
@ 2019-10-09 11:54 ` Adrian Moreno
2019-10-11 15:13 ` Maxime Coquelin
2019-10-09 11:54 ` [dpdk-dev] [PATCH v6 2/3] vhost: convert buffer addresses to GPA for logging Adrian Moreno
` (2 subsequent siblings)
3 siblings, 1 reply; 8+ messages in thread
From: Adrian Moreno @ 2019-10-09 11:54 UTC (permalink / raw)
To: dev; +Cc: tiwei.bie, zhihong.wang, maxime.coquelin, Adrian Moreno, stable
When IOMMU is enabled the incoming log address is in IOVA space. In that
case, look in IOTLB table and translate the resulting HVA to GPA.
If IOMMU is not enabled, the incoming log address is already a GPA so no
transformation is needed.
Fixes: 69c90e98f483 ("vhost: enable IOMMU support")
Cc: maxime.coquelin@redhat.com
Cc: stable@dpdk.org
Signed-off-by: Adrian Moreno <amorenoz@redhat.com>
---
v6 changes: use PRIx64 macro to fix UB1604-32 build
v5 changes: Rebase on top of dpdk-next-virtio
---
lib/librte_vhost/vhost.c | 1 +
lib/librte_vhost/vhost.h | 20 +++++++++++++++++
lib/librte_vhost/vhost_user.c | 42 ++++++++++++++++++++++++++++++++++-
3 files changed, 62 insertions(+), 1 deletion(-)
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index cea44df8c..76e753475 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -382,6 +382,7 @@ vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
vq->desc = NULL;
vq->avail = NULL;
vq->used = NULL;
+ vq->log_guest_addr = 0;
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_wr_unlock(vq);
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 5131a97a3..5e9e7f09d 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -447,6 +447,26 @@ gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
return 0;
}
+static __rte_always_inline uint64_t
+hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
+{
+ struct rte_vhost_mem_region *r;
+ uint32_t i;
+
+ if (unlikely(!dev || !dev->mem))
+ return 0;
+
+ for (i = 0; i < dev->mem->nregions; i++) {
+ r = &dev->mem->regions[i];
+
+ if (vva >= r->host_user_addr &&
+ vva + len < r->host_user_addr + r->size) {
+ return r->guest_phys_addr + vva - r->host_user_addr;
+ }
+ }
+ return 0;
+}
+
static __rte_always_inline struct virtio_net *
get_device(int vid)
{
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index ce4e9fb32..5ae561e55 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -575,6 +575,39 @@ ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
return qva_to_vva(dev, ra, size);
}
+/*
+ * Converts vring log address to GPA
+ * If IOMMU is enabled, the log address is IOVA
+ * If IOMMU not enabled, the log address is already GPA
+ */
+static uint64_t
+translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t log_addr)
+{
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
+ const uint64_t exp_size = sizeof(struct vring_used) +
+ sizeof(struct vring_used_elem) * vq->size;
+ uint64_t hva, gpa;
+ uint64_t size = exp_size;
+
+ hva = vhost_iova_to_vva(dev, vq, log_addr,
+ &size, VHOST_ACCESS_RW);
+ if (size != exp_size)
+ return 0;
+
+ gpa = hva_to_gpa(dev, hva, exp_size);
+ if (!gpa) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "VQ: Failed to find GPA for log_addr: 0x%" PRIx64 " hva: 0x%" PRIx64 "\n",
+ log_addr, hva);
+ return 0;
+ }
+ return gpa;
+
+ } else
+ return log_addr;
+}
+
static struct virtio_net *
translate_ring_addresses(struct virtio_net *dev, int vq_index)
{
@@ -682,7 +715,14 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)
vq->last_avail_idx = vq->used->idx;
}
- vq->log_guest_addr = addr->log_guest_addr;
+ vq->log_guest_addr =
+ translate_log_addr(dev, vq, addr->log_guest_addr);
+ if (vq->log_guest_addr == 0) {
+ RTE_LOG(DEBUG, VHOST_CONFIG,
+ "(%d) failed to map log_guest_addr .\n",
+ dev->vid);
+ return dev;
+ }
vq->access_ok = 1;
VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
--
2.21.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH v6 2/3] vhost: convert buffer addresses to GPA for logging
2019-10-09 11:54 [dpdk-dev] [PATCH v6 0/3] vhost: add support for IOVA_VA mode Adrian Moreno
2019-10-09 11:54 ` [dpdk-dev] [PATCH v6 1/3] vhost: translate incoming log address to gpa Adrian Moreno
@ 2019-10-09 11:54 ` Adrian Moreno
2019-10-11 15:16 ` Maxime Coquelin
2019-10-09 11:54 ` [dpdk-dev] [PATCH v6 3/3] vhost: prevent zero copy mode if iommu is on Adrian Moreno
2019-10-16 11:11 ` [dpdk-dev] [PATCH v6 0/3] vhost: add support for IOVA_VA mode Maxime Coquelin
3 siblings, 1 reply; 8+ messages in thread
From: Adrian Moreno @ 2019-10-09 11:54 UTC (permalink / raw)
To: dev; +Cc: tiwei.bie, zhihong.wang, maxime.coquelin, Adrian Moreno, stable
Add IOVA versions of dirty page logging functions.
Note that the API facing rte_vhost_log_write is not modified.
So, make explicit that it expects the address in GPA space.
Fixes: 69c90e98f483 ("vhost: enable IOMMU support")
Cc: maxime.coquelin@redhat.com
Cc: stable@dpdk.org
Signed-off-by: Adrian Moreno <amorenoz@redhat.com>
--
v4 Changes: Add vhost_log_write_iova (in adition to _cache_ version)
and use it in vdpa (non-batched) dirty page logging
---
lib/librte_vhost/rte_vhost.h | 2 +-
lib/librte_vhost/vdpa.c | 3 ++-
lib/librte_vhost/vhost.c | 40 +++++++++++++++++++++++++++++++++++
lib/librte_vhost/vhost.h | 31 +++++++++++++++++++++++++++
lib/librte_vhost/virtio_net.c | 12 ++++++-----
5 files changed, 81 insertions(+), 7 deletions(-)
diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index 19474bca0..fa813b05a 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -274,7 +274,7 @@ rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem,
* @param vid
* vhost device ID
* @param addr
- * the starting address for write
+ * the starting address for write (in guest physical address space)
* @param len
* the length to write
*/
diff --git a/lib/librte_vhost/vdpa.c b/lib/librte_vhost/vdpa.c
index 8e45ce9f8..2b8670873 100644
--- a/lib/librte_vhost/vdpa.c
+++ b/lib/librte_vhost/vdpa.c
@@ -201,7 +201,8 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
goto fail;
desc = desc_ring[desc_id];
if (desc.flags & VRING_DESC_F_WRITE)
- vhost_log_write(dev, desc.addr, desc.len);
+ vhost_log_write_iova(dev, vq, desc.addr,
+ desc.len);
desc_id = desc.next;
} while (desc.flags & VRING_DESC_F_NEXT);
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 76e753475..6fb3dc257 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -115,6 +115,26 @@ __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
}
}
+void
+__vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ uint64_t hva, gpa, map_len;
+ map_len = len;
+
+ hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
+ if (map_len != len) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+ iova);
+ return;
+ }
+
+ gpa = hva_to_gpa(dev, hva, len);
+ if (gpa)
+ __vhost_log_write(dev, gpa, len);
+}
+
void
__vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
@@ -200,6 +220,26 @@ __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
}
+void
+__vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ uint64_t hva, gpa, map_len;
+ map_len = len;
+
+ hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
+ if (map_len != len) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+ iova);
+ return;
+ }
+
+ gpa = hva_to_gpa(dev, hva, len);
+ if (gpa)
+ __vhost_log_cache_write(dev, vq, gpa, len);
+}
+
void *
vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t desc_addr, uint64_t desc_len)
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 5e9e7f09d..a2140c036 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -353,9 +353,14 @@ desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
void __vhost_log_cache_write(struct virtio_net *dev,
struct vhost_virtqueue *vq,
uint64_t addr, uint64_t len);
+void __vhost_log_cache_write_iova(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len);
void __vhost_log_cache_sync(struct virtio_net *dev,
struct vhost_virtqueue *vq);
void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
+void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len);
static __rte_always_inline void
vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
@@ -393,6 +398,32 @@ vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
vhost_log_write(dev, vq->log_guest_addr + offset, len);
}
+static __rte_always_inline void
+vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+ return;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ __vhost_log_cache_write_iova(dev, vq, iova, len);
+ else
+ __vhost_log_cache_write(dev, vq, iova, len);
+}
+
+static __rte_always_inline void
+vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+ return;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ __vhost_log_write_iova(dev, vq, iova, len);
+ else
+ __vhost_log_write(dev, iova, len);
+}
+
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 5b85b832d..22961cafe 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -178,7 +178,8 @@ do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
for (i = 0; i < count; i++) {
rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
- vhost_log_cache_write(dev, vq, elem[i].log_addr, elem[i].len);
+ vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
+ elem[i].len);
PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
}
@@ -633,7 +634,7 @@ copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
PRINT_PACKET(dev, (uintptr_t)dst,
(uint32_t)len, 0);
- vhost_log_cache_write(dev, vq,
+ vhost_log_cache_write_iova(dev, vq,
iova, len);
remain -= len;
@@ -733,7 +734,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
} else {
PRINT_PACKET(dev, (uintptr_t)hdr_addr,
dev->vhost_hlen, 0);
- vhost_log_cache_write(dev, vq,
+ vhost_log_cache_write_iova(dev, vq,
buf_vec[0].buf_iova,
dev->vhost_hlen);
}
@@ -748,8 +749,9 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
cpy_len);
- vhost_log_cache_write(dev, vq, buf_iova + buf_offset,
- cpy_len);
+ vhost_log_cache_write_iova(dev, vq,
+ buf_iova + buf_offset,
+ cpy_len);
PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
cpy_len, 0);
} else {
--
2.21.0
^ permalink raw reply [flat|nested] 8+ messages in thread