From: Adrian Moreno <amorenoz@redhat.com>
To: dev@dpdk.org
Cc: tiwei.bie@intel.com, zhihong.wang@intel.com,
maxime.coquelin@redhat.com, Adrian Moreno <amorenoz@redhat.com>,
stable@dpdk.org
Subject: [dpdk-dev] [PATCH v6 2/3] vhost: convert buffer addresses to GPA for logging
Date: Wed, 9 Oct 2019 13:54:31 +0200 [thread overview]
Message-ID: <20191009115432.14863-3-amorenoz@redhat.com> (raw)
In-Reply-To: <20191009115432.14863-1-amorenoz@redhat.com>
Add IOVA versions of dirty page logging functions.
Note that the API facing rte_vhost_log_write is not modified.
So, make explicit that it expects the address in GPA space.
Fixes: 69c90e98f483 ("vhost: enable IOMMU support")
Cc: maxime.coquelin@redhat.com
Cc: stable@dpdk.org
Signed-off-by: Adrian Moreno <amorenoz@redhat.com>
--
v4 Changes: Add vhost_log_write_iova (in adition to _cache_ version)
and use it in vdpa (non-batched) dirty page logging
---
lib/librte_vhost/rte_vhost.h | 2 +-
lib/librte_vhost/vdpa.c | 3 ++-
lib/librte_vhost/vhost.c | 40 +++++++++++++++++++++++++++++++++++
lib/librte_vhost/vhost.h | 31 +++++++++++++++++++++++++++
lib/librte_vhost/virtio_net.c | 12 ++++++-----
5 files changed, 81 insertions(+), 7 deletions(-)
diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index 19474bca0..fa813b05a 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -274,7 +274,7 @@ rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem,
* @param vid
* vhost device ID
* @param addr
- * the starting address for write
+ * the starting address for write (in guest physical address space)
* @param len
* the length to write
*/
diff --git a/lib/librte_vhost/vdpa.c b/lib/librte_vhost/vdpa.c
index 8e45ce9f8..2b8670873 100644
--- a/lib/librte_vhost/vdpa.c
+++ b/lib/librte_vhost/vdpa.c
@@ -201,7 +201,8 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
goto fail;
desc = desc_ring[desc_id];
if (desc.flags & VRING_DESC_F_WRITE)
- vhost_log_write(dev, desc.addr, desc.len);
+ vhost_log_write_iova(dev, vq, desc.addr,
+ desc.len);
desc_id = desc.next;
} while (desc.flags & VRING_DESC_F_NEXT);
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 76e753475..6fb3dc257 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -115,6 +115,26 @@ __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
}
}
+void
+__vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ uint64_t hva, gpa, map_len;
+ map_len = len;
+
+ hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
+ if (map_len != len) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+ iova);
+ return;
+ }
+
+ gpa = hva_to_gpa(dev, hva, len);
+ if (gpa)
+ __vhost_log_write(dev, gpa, len);
+}
+
void
__vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
@@ -200,6 +220,26 @@ __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
}
+void
+__vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ uint64_t hva, gpa, map_len;
+ map_len = len;
+
+ hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
+ if (map_len != len) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+ iova);
+ return;
+ }
+
+ gpa = hva_to_gpa(dev, hva, len);
+ if (gpa)
+ __vhost_log_cache_write(dev, vq, gpa, len);
+}
+
void *
vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t desc_addr, uint64_t desc_len)
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 5e9e7f09d..a2140c036 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -353,9 +353,14 @@ desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
void __vhost_log_cache_write(struct virtio_net *dev,
struct vhost_virtqueue *vq,
uint64_t addr, uint64_t len);
+void __vhost_log_cache_write_iova(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len);
void __vhost_log_cache_sync(struct virtio_net *dev,
struct vhost_virtqueue *vq);
void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
+void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len);
static __rte_always_inline void
vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
@@ -393,6 +398,32 @@ vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
vhost_log_write(dev, vq->log_guest_addr + offset, len);
}
+static __rte_always_inline void
+vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+ return;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ __vhost_log_cache_write_iova(dev, vq, iova, len);
+ else
+ __vhost_log_cache_write(dev, vq, iova, len);
+}
+
+static __rte_always_inline void
+vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+ return;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ __vhost_log_write_iova(dev, vq, iova, len);
+ else
+ __vhost_log_write(dev, iova, len);
+}
+
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 5b85b832d..22961cafe 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -178,7 +178,8 @@ do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
for (i = 0; i < count; i++) {
rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
- vhost_log_cache_write(dev, vq, elem[i].log_addr, elem[i].len);
+ vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
+ elem[i].len);
PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
}
@@ -633,7 +634,7 @@ copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
PRINT_PACKET(dev, (uintptr_t)dst,
(uint32_t)len, 0);
- vhost_log_cache_write(dev, vq,
+ vhost_log_cache_write_iova(dev, vq,
iova, len);
remain -= len;
@@ -733,7 +734,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
} else {
PRINT_PACKET(dev, (uintptr_t)hdr_addr,
dev->vhost_hlen, 0);
- vhost_log_cache_write(dev, vq,
+ vhost_log_cache_write_iova(dev, vq,
buf_vec[0].buf_iova,
dev->vhost_hlen);
}
@@ -748,8 +749,9 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
cpy_len);
- vhost_log_cache_write(dev, vq, buf_iova + buf_offset,
- cpy_len);
+ vhost_log_cache_write_iova(dev, vq,
+ buf_iova + buf_offset,
+ cpy_len);
PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
cpy_len, 0);
} else {
--
2.21.0
next prev parent reply other threads:[~2019-10-09 11:55 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-10-09 11:54 [dpdk-dev] [PATCH v6 0/3] vhost: add support for IOVA_VA mode Adrian Moreno
2019-10-09 11:54 ` [dpdk-dev] [PATCH v6 1/3] vhost: translate incoming log address to gpa Adrian Moreno
2019-10-11 15:13 ` Maxime Coquelin
2019-10-09 11:54 ` Adrian Moreno [this message]
2019-10-11 15:16 ` [dpdk-dev] [PATCH v6 2/3] vhost: convert buffer addresses to GPA for logging Maxime Coquelin
2019-10-09 11:54 ` [dpdk-dev] [PATCH v6 3/3] vhost: prevent zero copy mode if iommu is on Adrian Moreno
2019-10-11 15:16 ` Maxime Coquelin
2019-10-16 11:11 ` [dpdk-dev] [PATCH v6 0/3] vhost: add support for IOVA_VA mode Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191009115432.14863-3-amorenoz@redhat.com \
--to=amorenoz@redhat.com \
--cc=dev@dpdk.org \
--cc=maxime.coquelin@redhat.com \
--cc=stable@dpdk.org \
--cc=tiwei.bie@intel.com \
--cc=zhihong.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).