* [dpdk-dev] [PATCH 0/2] vhost: handle memory hotplug for async vhost
@ 2021-06-02 4:28 Cheng Jiang
2021-06-02 4:28 ` [dpdk-dev] [PATCH 1/2] vhost: add unsafe API to drain pkts in " Cheng Jiang
` (8 more replies)
0 siblings, 9 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-06-02 4:28 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch set is to provide an unsafe API to drain in-flight pkts
which are submitted to DMA engine in vhost async data path, and
notify the vhost application of stopping DMA transfers.
Cheng Jiang (1):
vhost: add unsafe API to drain pkts in async vhost
Jiayu Hu (1):
vhost: handle memory hotplug for async vhost
examples/vhost/main.c | 48 +++++++++++++++++++-
examples/vhost/main.h | 1 +
lib/vhost/rte_vhost_async.h | 22 +++++++++
lib/vhost/version.map | 3 ++
lib/vhost/vhost_user.c | 9 ++++
lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
6 files changed, 148 insertions(+), 25 deletions(-)
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH 1/2] vhost: add unsafe API to drain pkts in async vhost
2021-06-02 4:28 [dpdk-dev] [PATCH 0/2] vhost: handle memory hotplug for async vhost Cheng Jiang
@ 2021-06-02 4:28 ` Cheng Jiang
2021-06-07 13:46 ` Maxime Coquelin
2021-06-02 4:28 ` [dpdk-dev] [PATCH 2/2] vhost: handle memory hotplug for " Cheng Jiang
` (7 subsequent siblings)
8 siblings, 1 reply; 70+ messages in thread
From: Cheng Jiang @ 2021-06-02 4:28 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
Applications need to stop DMA transfers and finish all the in-flight
pkts when in VM memory hot-plug case and async vhost is used. This
patch is to provide an unsafe API to drain in-flight pkts which are
submitted to DMA engine in vhost async data path. And enable it in
vhost example.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
examples/vhost/main.c | 48 +++++++++++++++++++-
examples/vhost/main.h | 1 +
lib/vhost/rte_vhost_async.h | 22 +++++++++
lib/vhost/version.map | 3 ++
lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
5 files changed, 139 insertions(+), 25 deletions(-)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index d2179eadb9..70bb67c7f8 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -851,8 +851,11 @@ complete_async_pkts(struct vhost_dev *vdev)
complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
- if (complete_count)
+ if (complete_count) {
free_pkts(p_cpl, complete_count);
+ __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
+ }
+
}
static __rte_always_inline void
@@ -895,6 +898,7 @@ drain_vhost(struct vhost_dev *vdev)
complete_async_pkts(vdev);
ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST);
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1226,6 +1230,9 @@ drain_eth_rx(struct vhost_dev *vdev)
enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
VIRTIO_RXQ, pkts, rx_count,
m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr,
+ __ATOMIC_SEQ_CST);
+
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1397,8 +1404,15 @@ destroy_device(int vid)
"(%d) device has been removed from data core\n",
vdev->vid);
- if (async_vhost_driver)
+ if (async_vhost_driver) {
+ uint16_t n_pkt = 0;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+ n_pkt = rte_vhost_drain_queue_thread_unsafe(vid, VIRTIO_RXQ, m_cpl,
+ vdev->pkts_inflight);
+
+ free_pkts(m_cpl, n_pkt);
rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
+ }
rte_free(vdev);
}
@@ -1487,6 +1501,35 @@ new_device(int vid)
return 0;
}
+static int
+vring_state_changed(int vid, uint16_t queue_id, int enable)
+{
+ struct vhost_dev *vdev = NULL;
+
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (vdev->vid == vid)
+ break;
+ }
+ if (!vdev)
+ return -1;
+
+ if (queue_id != VIRTIO_RXQ)
+ return 0;
+
+ if (async_vhost_driver) {
+ if (!enable) {
+ uint16_t n_pkt;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+ n_pkt = rte_vhost_drain_queue_thread_unsafe(vid, queue_id,
+ m_cpl, vdev->pkts_inflight);
+ free_pkts(m_cpl, n_pkt);
+ }
+ }
+
+ return 0;
+}
+
/*
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
@@ -1495,6 +1538,7 @@ static const struct vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
+ .vring_state_changed = vring_state_changed,
};
/*
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 0ccdce4b4a..e7b1ac60a6 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -51,6 +51,7 @@ struct vhost_dev {
uint64_t features;
size_t hdr_len;
uint16_t nr_vrings;
+ uint16_t pkts_inflight;
struct rte_vhost_memory *mem;
struct device_statistics stats;
TAILQ_ENTRY(vhost_dev) global_vdev_entry;
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 6faa31f5ad..041f40cf04 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -193,4 +193,26 @@ __rte_experimental
uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count);
+/**
+ * This function checks async completion status and empty all pakcets
+ * for a specific vhost device queue. Packets which are inflight will
+ * be returned in an array.
+ *
+ * @note This function does not perform any locking
+ *
+ * @param vid
+ * id of vhost device to enqueue data
+ * @param queue_id
+ * queue id to enqueue data
+ * @param pkts
+ * blank array to get return packet pointer
+ * @param count
+ * size of the packet array
+ * @return
+ * num of packets returned
+ */
+__rte_experimental
+uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count);
+
#endif /* _RTE_VHOST_ASYNC_H_ */
diff --git a/lib/vhost/version.map b/lib/vhost/version.map
index 9103a23cd4..f480f188af 100644
--- a/lib/vhost/version.map
+++ b/lib/vhost/version.map
@@ -79,4 +79,7 @@ EXPERIMENTAL {
# added in 21.05
rte_vhost_get_negotiated_protocol_features;
+
+ # added in 21.08
+ rte_vhost_drain_queue_thread_unsafe;
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 8da8a86a10..793510974a 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2082,36 +2082,18 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
} while (nr_left > 0);
}
-uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+static __rte_always_inline uint16_t
+vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
- struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
uint16_t start_idx, pkts_idx, vq_size;
struct async_inflight_info *pkts_info;
uint16_t from, i;
- if (!dev)
- return 0;
-
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
- rte_spinlock_lock(&vq->access_lock);
-
pkts_idx = vq->async_pkts_idx % vq->size;
pkts_info = vq->async_pkts_info;
vq_size = vq->size;
@@ -2119,14 +2101,14 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq_size, vq->async_pkts_inflight_n);
if (count > vq->async_last_pkts_n)
- n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+ n_pkts_cpl = vq->async_ops.check_completed_copies(dev->vid,
queue_id, 0, count - vq->async_last_pkts_n);
n_pkts_cpl += vq->async_last_pkts_n;
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
if (unlikely(n_pkts_put == 0)) {
vq->async_last_pkts_n = n_pkts_cpl;
- goto done;
+ return 0;
}
if (vq_is_packed(dev)) {
@@ -2165,12 +2147,74 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq->last_async_desc_idx_split += n_descs;
}
-done:
+ return n_pkts_put;
+}
+
+uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_put = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ n_pkts_put = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
rte_spinlock_unlock(&vq->access_lock);
return n_pkts_put;
}
+uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts = count;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ while (count)
+ count -= vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
+ return n_pkts;
+}
+
static __rte_always_inline uint32_t
virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count,
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH 2/2] vhost: handle memory hotplug for async vhost
2021-06-02 4:28 [dpdk-dev] [PATCH 0/2] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-06-02 4:28 ` [dpdk-dev] [PATCH 1/2] vhost: add unsafe API to drain pkts in " Cheng Jiang
@ 2021-06-02 4:28 ` Cheng Jiang
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 0/3] " Cheng Jiang
` (6 subsequent siblings)
8 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-06-02 4:28 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
From: Jiayu Hu <jiayu.hu@intel.com>
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch is to notify the vhost application of stopping DMA
transfers.
Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
---
lib/vhost/vhost_user.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 8f0eba6412..6800e60c2d 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -1223,6 +1223,15 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
}
+
+ /* notify the backend application to stop DMA transfers */
+ if (dev->async_copy && dev->notify_ops->vring_state_changed) {
+ for (i = 0; i < dev->nr_vring; i++) {
+ dev->notify_ops->vring_state_changed(dev->vid,
+ i, 0);
+ }
+ }
+
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] vhost: add unsafe API to drain pkts in async vhost
2021-06-02 4:28 ` [dpdk-dev] [PATCH 1/2] vhost: add unsafe API to drain pkts in " Cheng Jiang
@ 2021-06-07 13:46 ` Maxime Coquelin
2021-06-08 5:26 ` Jiang, Cheng1
0 siblings, 1 reply; 70+ messages in thread
From: Maxime Coquelin @ 2021-06-07 13:46 UTC (permalink / raw)
To: Cheng Jiang, maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
On 6/2/21 6:28 AM, Cheng Jiang wrote:
> Applications need to stop DMA transfers and finish all the in-flight
> pkts when in VM memory hot-plug case and async vhost is used. This
> patch is to provide an unsafe API to drain in-flight pkts which are
> submitted to DMA engine in vhost async data path. And enable it in
> vhost example.
>
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
> examples/vhost/main.c | 48 +++++++++++++++++++-
> examples/vhost/main.h | 1 +
> lib/vhost/rte_vhost_async.h | 22 +++++++++
> lib/vhost/version.map | 3 ++
> lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
> 5 files changed, 139 insertions(+), 25 deletions(-)
Please split example and lib changes in dedicated patches.
>
> diff --git a/examples/vhost/main.c b/examples/vhost/main.c
> index d2179eadb9..70bb67c7f8 100644
> --- a/examples/vhost/main.c
> +++ b/examples/vhost/main.c
> @@ -851,8 +851,11 @@ complete_async_pkts(struct vhost_dev *vdev)
>
> complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
> VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
> - if (complete_count)
> + if (complete_count) {
> free_pkts(p_cpl, complete_count);
> + __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
> + }
> +
> }
>
> static __rte_always_inline void
> @@ -895,6 +898,7 @@ drain_vhost(struct vhost_dev *vdev)
> complete_async_pkts(vdev);
> ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
> m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
> + __atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST);
>
> if (cpu_cpl_nr)
> free_pkts(m_cpu_cpl, cpu_cpl_nr);
> @@ -1226,6 +1230,9 @@ drain_eth_rx(struct vhost_dev *vdev)
> enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
> VIRTIO_RXQ, pkts, rx_count,
> m_cpu_cpl, &cpu_cpl_nr);
> + __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr,
> + __ATOMIC_SEQ_CST);
> +
> if (cpu_cpl_nr)
> free_pkts(m_cpu_cpl, cpu_cpl_nr);
>
> @@ -1397,8 +1404,15 @@ destroy_device(int vid)
> "(%d) device has been removed from data core\n",
> vdev->vid);
>
> - if (async_vhost_driver)
> + if (async_vhost_driver) {
> + uint16_t n_pkt = 0;
> + struct rte_mbuf *m_cpl[vdev->pkts_inflight];
> + n_pkt = rte_vhost_drain_queue_thread_unsafe(vid, VIRTIO_RXQ, m_cpl,
> + vdev->pkts_inflight);
> +
> + free_pkts(m_cpl, n_pkt);
> rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
> + }
>
> rte_free(vdev);
> }
> @@ -1487,6 +1501,35 @@ new_device(int vid)
> return 0;
> }
>
> +static int
> +vring_state_changed(int vid, uint16_t queue_id, int enable)
> +{
> + struct vhost_dev *vdev = NULL;
> +
> + TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
> + if (vdev->vid == vid)
> + break;
> + }
> + if (!vdev)
> + return -1;
> +
> + if (queue_id != VIRTIO_RXQ)
> + return 0;
> +
> + if (async_vhost_driver) {
> + if (!enable) {
> + uint16_t n_pkt;
> + struct rte_mbuf *m_cpl[vdev->pkts_inflight];
> +
> + n_pkt = rte_vhost_drain_queue_thread_unsafe(vid, queue_id,
> + m_cpl, vdev->pkts_inflight);
> + free_pkts(m_cpl, n_pkt);
> + }
> + }
> +
> + return 0;
> +}
> +
> /*
> * These callback allow devices to be added to the data core when configuration
> * has been fully complete.
> @@ -1495,6 +1538,7 @@ static const struct vhost_device_ops virtio_net_device_ops =
> {
> .new_device = new_device,
> .destroy_device = destroy_device,
> + .vring_state_changed = vring_state_changed,
> };
>
> /*
> diff --git a/examples/vhost/main.h b/examples/vhost/main.h
> index 0ccdce4b4a..e7b1ac60a6 100644
> --- a/examples/vhost/main.h
> +++ b/examples/vhost/main.h
> @@ -51,6 +51,7 @@ struct vhost_dev {
> uint64_t features;
> size_t hdr_len;
> uint16_t nr_vrings;
> + uint16_t pkts_inflight;
> struct rte_vhost_memory *mem;
> struct device_statistics stats;
> TAILQ_ENTRY(vhost_dev) global_vdev_entry;
> diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
> index 6faa31f5ad..041f40cf04 100644
> --- a/lib/vhost/rte_vhost_async.h
> +++ b/lib/vhost/rte_vhost_async.h
> @@ -193,4 +193,26 @@ __rte_experimental
> uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> struct rte_mbuf **pkts, uint16_t count);
>
> +/**
> + * This function checks async completion status and empty all pakcets
> + * for a specific vhost device queue. Packets which are inflight will
> + * be returned in an array.
> + *
> + * @note This function does not perform any locking
> + *
> + * @param vid
> + * id of vhost device to enqueue data
> + * @param queue_id
> + * queue id to enqueue data
> + * @param pkts
> + * blank array to get return packet pointer
> + * @param count
> + * size of the packet array
> + * @return
> + * num of packets returned
> + */
> +__rte_experimental
> +uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint16_t count);
> +
> #endif /* _RTE_VHOST_ASYNC_H_ */
> diff --git a/lib/vhost/version.map b/lib/vhost/version.map
> index 9103a23cd4..f480f188af 100644
> --- a/lib/vhost/version.map
> +++ b/lib/vhost/version.map
> @@ -79,4 +79,7 @@ EXPERIMENTAL {
>
> # added in 21.05
> rte_vhost_get_negotiated_protocol_features;
> +
> + # added in 21.08
> + rte_vhost_drain_queue_thread_unsafe;
> };
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 8da8a86a10..793510974a 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -2082,36 +2082,18 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
> } while (nr_left > 0);
> }
>
> -uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> +static __rte_always_inline uint16_t
> +vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
> struct rte_mbuf **pkts, uint16_t count)
> {
> - struct virtio_net *dev = get_device(vid);
> struct vhost_virtqueue *vq;
> uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
> uint16_t start_idx, pkts_idx, vq_size;
> struct async_inflight_info *pkts_info;
> uint16_t from, i;
>
> - if (!dev)
> - return 0;
> -
> - VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> - VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
> - dev->vid, __func__, queue_id);
> - return 0;
> - }
> -
> vq = dev->virtqueue[queue_id];
>
> - if (unlikely(!vq->async_registered)) {
> - VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
> - dev->vid, __func__, queue_id);
> - return 0;
> - }
> -
> - rte_spinlock_lock(&vq->access_lock);
> -
> pkts_idx = vq->async_pkts_idx % vq->size;
> pkts_info = vq->async_pkts_info;
> vq_size = vq->size;
> @@ -2119,14 +2101,14 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> vq_size, vq->async_pkts_inflight_n);
>
> if (count > vq->async_last_pkts_n)
> - n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
> + n_pkts_cpl = vq->async_ops.check_completed_copies(dev->vid,
> queue_id, 0, count - vq->async_last_pkts_n);
> n_pkts_cpl += vq->async_last_pkts_n;
>
> n_pkts_put = RTE_MIN(count, n_pkts_cpl);
> if (unlikely(n_pkts_put == 0)) {
> vq->async_last_pkts_n = n_pkts_cpl;
> - goto done;
> + return 0;
> }
>
> if (vq_is_packed(dev)) {
> @@ -2165,12 +2147,74 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> vq->last_async_desc_idx_split += n_descs;
> }
>
> -done:
> + return n_pkts_put;
> +}
> +
> +uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint16_t count)
> +{
> + struct virtio_net *dev = get_device(vid);
> + struct vhost_virtqueue *vq;
> + uint16_t n_pkts_put = 0;
> +
> + if (!dev)
> + return 0;
> +
> + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + vq = dev->virtqueue[queue_id];
> +
> + if (unlikely(!vq->async_registered)) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + rte_spinlock_lock(&vq->access_lock);
> +
> + n_pkts_put = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
> +
> rte_spinlock_unlock(&vq->access_lock);
>
> return n_pkts_put;
> }
>
> +uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint16_t count)
> +{
> + struct virtio_net *dev = get_device(vid);
> + struct vhost_virtqueue *vq;
> + uint16_t n_pkts = count;
> +
> + if (!dev)
> + return 0;
> +
> + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + vq = dev->virtqueue[queue_id];
> +
> + if (unlikely(!vq->async_registered)) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + while (count)
> + count -= vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
> +
> + return n_pkts;
> +}
> +
> static __rte_always_inline uint32_t
> virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
> struct rte_mbuf **pkts, uint32_t count,
>
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] vhost: add unsafe API to drain pkts in async vhost
2021-06-07 13:46 ` Maxime Coquelin
@ 2021-06-08 5:26 ` Jiang, Cheng1
0 siblings, 0 replies; 70+ messages in thread
From: Jiang, Cheng1 @ 2021-06-08 5:26 UTC (permalink / raw)
To: Maxime Coquelin, maxime.coquelin, Xia, Chenbo
Cc: dev, Hu, Jiayu, Yang, YvonneX
Hi Maxime,
> -----Original Message-----
> From: Maxime Coquelin <mcoqueli@redhat.com>
> Sent: Monday, June 7, 2021 9:46 PM
> To: Jiang, Cheng1 <cheng1.jiang@intel.com>; maxime.coquelin@redhat.com;
> Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>
> Subject: Re: [PATCH 1/2] vhost: add unsafe API to drain pkts in async vhost
>
>
>
> On 6/2/21 6:28 AM, Cheng Jiang wrote:
> > Applications need to stop DMA transfers and finish all the in-flight
> > pkts when in VM memory hot-plug case and async vhost is used. This
> > patch is to provide an unsafe API to drain in-flight pkts which are
> > submitted to DMA engine in vhost async data path. And enable it in
> > vhost example.
> >
> > Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> > ---
> > examples/vhost/main.c | 48 +++++++++++++++++++-
> > examples/vhost/main.h | 1 +
> > lib/vhost/rte_vhost_async.h | 22 +++++++++
> > lib/vhost/version.map | 3 ++
> > lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
> > 5 files changed, 139 insertions(+), 25 deletions(-)
>
> Please split example and lib changes in dedicated patches.
Sure, it will be fixed in then next version.
Thanks,
Cheng
>
> >
> > diff --git a/examples/vhost/main.c b/examples/vhost/main.c index
> > d2179eadb9..70bb67c7f8 100644
> > --- a/examples/vhost/main.c
> > +++ b/examples/vhost/main.c
> > @@ -851,8 +851,11 @@ complete_async_pkts(struct vhost_dev *vdev)
> >
> > complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
> > VIRTIO_RXQ, p_cpl,
> MAX_PKT_BURST);
> > - if (complete_count)
> > + if (complete_count) {
> > free_pkts(p_cpl, complete_count);
> > + __atomic_sub_fetch(&vdev->pkts_inflight, complete_count,
> __ATOMIC_SEQ_CST);
> > + }
> > +
> > }
> >
> > static __rte_always_inline void
> > @@ -895,6 +898,7 @@ drain_vhost(struct vhost_dev *vdev)
> > complete_async_pkts(vdev);
> > ret = rte_vhost_submit_enqueue_burst(vdev->vid,
> VIRTIO_RXQ,
> > m, nr_xmit, m_cpu_cpl,
> &cpu_cpl_nr);
> > + __atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr,
> > +__ATOMIC_SEQ_CST);
> >
> > if (cpu_cpl_nr)
> > free_pkts(m_cpu_cpl, cpu_cpl_nr);
> > @@ -1226,6 +1230,9 @@ drain_eth_rx(struct vhost_dev *vdev)
> > enqueue_count = rte_vhost_submit_enqueue_burst(vdev-
> >vid,
> > VIRTIO_RXQ, pkts, rx_count,
> > m_cpu_cpl, &cpu_cpl_nr);
> > + __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count
> - cpu_cpl_nr,
> > + __ATOMIC_SEQ_CST);
> > +
> > if (cpu_cpl_nr)
> > free_pkts(m_cpu_cpl, cpu_cpl_nr);
> >
> > @@ -1397,8 +1404,15 @@ destroy_device(int vid)
> > "(%d) device has been removed from data core\n",
> > vdev->vid);
> >
> > - if (async_vhost_driver)
> > + if (async_vhost_driver) {
> > + uint16_t n_pkt = 0;
> > + struct rte_mbuf *m_cpl[vdev->pkts_inflight];
> > + n_pkt = rte_vhost_drain_queue_thread_unsafe(vid,
> VIRTIO_RXQ, m_cpl,
> > + vdev->pkts_inflight);
> > +
> > + free_pkts(m_cpl, n_pkt);
> > rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
> > + }
> >
> > rte_free(vdev);
> > }
> > @@ -1487,6 +1501,35 @@ new_device(int vid)
> > return 0;
> > }
> >
> > +static int
> > +vring_state_changed(int vid, uint16_t queue_id, int enable) {
> > + struct vhost_dev *vdev = NULL;
> > +
> > + TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
> > + if (vdev->vid == vid)
> > + break;
> > + }
> > + if (!vdev)
> > + return -1;
> > +
> > + if (queue_id != VIRTIO_RXQ)
> > + return 0;
> > +
> > + if (async_vhost_driver) {
> > + if (!enable) {
> > + uint16_t n_pkt;
> > + struct rte_mbuf *m_cpl[vdev->pkts_inflight];
> > +
> > + n_pkt = rte_vhost_drain_queue_thread_unsafe(vid,
> queue_id,
> > + m_cpl, vdev-
> >pkts_inflight);
> > + free_pkts(m_cpl, n_pkt);
> > + }
> > + }
> > +
> > + return 0;
> > +}
> > +
> > /*
> > * These callback allow devices to be added to the data core when
> configuration
> > * has been fully complete.
> > @@ -1495,6 +1538,7 @@ static const struct vhost_device_ops
> > virtio_net_device_ops = {
> > .new_device = new_device,
> > .destroy_device = destroy_device,
> > + .vring_state_changed = vring_state_changed,
> > };
> >
> > /*
> > diff --git a/examples/vhost/main.h b/examples/vhost/main.h index
> > 0ccdce4b4a..e7b1ac60a6 100644
> > --- a/examples/vhost/main.h
> > +++ b/examples/vhost/main.h
> > @@ -51,6 +51,7 @@ struct vhost_dev {
> > uint64_t features;
> > size_t hdr_len;
> > uint16_t nr_vrings;
> > + uint16_t pkts_inflight;
> > struct rte_vhost_memory *mem;
> > struct device_statistics stats;
> > TAILQ_ENTRY(vhost_dev) global_vdev_entry; diff --git
> > a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h index
> > 6faa31f5ad..041f40cf04 100644
> > --- a/lib/vhost/rte_vhost_async.h
> > +++ b/lib/vhost/rte_vhost_async.h
> > @@ -193,4 +193,26 @@ __rte_experimental uint16_t
> > rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > struct rte_mbuf **pkts, uint16_t count);
> >
> > +/**
> > + * This function checks async completion status and empty all pakcets
> > + * for a specific vhost device queue. Packets which are inflight will
> > + * be returned in an array.
> > + *
> > + * @note This function does not perform any locking
> > + *
> > + * @param vid
> > + * id of vhost device to enqueue data
> > + * @param queue_id
> > + * queue id to enqueue data
> > + * @param pkts
> > + * blank array to get return packet pointer
> > + * @param count
> > + * size of the packet array
> > + * @return
> > + * num of packets returned
> > + */
> > +__rte_experimental
> > +uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t
> queue_id,
> > + struct rte_mbuf **pkts, uint16_t count);
> > +
> > #endif /* _RTE_VHOST_ASYNC_H_ */
> > diff --git a/lib/vhost/version.map b/lib/vhost/version.map index
> > 9103a23cd4..f480f188af 100644
> > --- a/lib/vhost/version.map
> > +++ b/lib/vhost/version.map
> > @@ -79,4 +79,7 @@ EXPERIMENTAL {
> >
> > # added in 21.05
> > rte_vhost_get_negotiated_protocol_features;
> > +
> > + # added in 21.08
> > + rte_vhost_drain_queue_thread_unsafe;
> > };
> > diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index
> > 8da8a86a10..793510974a 100644
> > --- a/lib/vhost/virtio_net.c
> > +++ b/lib/vhost/virtio_net.c
> > @@ -2082,36 +2082,18 @@ write_back_completed_descs_packed(struct
> vhost_virtqueue *vq,
> > } while (nr_left > 0);
> > }
> >
> > -uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > +static __rte_always_inline uint16_t
> > +vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t
> > +queue_id,
> > struct rte_mbuf **pkts, uint16_t count) {
> > - struct virtio_net *dev = get_device(vid);
> > struct vhost_virtqueue *vq;
> > uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
> > uint16_t start_idx, pkts_idx, vq_size;
> > struct async_inflight_info *pkts_info;
> > uint16_t from, i;
> >
> > - if (!dev)
> > - return 0;
> > -
> > - VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> > - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> > - VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue
> idx %d.\n",
> > - dev->vid, __func__, queue_id);
> > - return 0;
> > - }
> > -
> > vq = dev->virtqueue[queue_id];
> >
> > - if (unlikely(!vq->async_registered)) {
> > - VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for
> queue id %d.\n",
> > - dev->vid, __func__, queue_id);
> > - return 0;
> > - }
> > -
> > - rte_spinlock_lock(&vq->access_lock);
> > -
> > pkts_idx = vq->async_pkts_idx % vq->size;
> > pkts_info = vq->async_pkts_info;
> > vq_size = vq->size;
> > @@ -2119,14 +2101,14 @@ uint16_t
> rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > vq_size, vq->async_pkts_inflight_n);
> >
> > if (count > vq->async_last_pkts_n)
> > - n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
> > + n_pkts_cpl = vq->async_ops.check_completed_copies(dev-
> >vid,
> > queue_id, 0, count - vq->async_last_pkts_n);
> > n_pkts_cpl += vq->async_last_pkts_n;
> >
> > n_pkts_put = RTE_MIN(count, n_pkts_cpl);
> > if (unlikely(n_pkts_put == 0)) {
> > vq->async_last_pkts_n = n_pkts_cpl;
> > - goto done;
> > + return 0;
> > }
> >
> > if (vq_is_packed(dev)) {
> > @@ -2165,12 +2147,74 @@ uint16_t
> rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > vq->last_async_desc_idx_split += n_descs;
> > }
> >
> > -done:
> > + return n_pkts_put;
> > +}
> > +
> > +uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > + struct rte_mbuf **pkts, uint16_t count) {
> > + struct virtio_net *dev = get_device(vid);
> > + struct vhost_virtqueue *vq;
> > + uint16_t n_pkts_put = 0;
> > +
> > + if (!dev)
> > + return 0;
> > +
> > + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> > + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue
> idx %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + vq = dev->virtqueue[queue_id];
> > +
> > + if (unlikely(!vq->async_registered)) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for
> queue id %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + rte_spinlock_lock(&vq->access_lock);
> > +
> > + n_pkts_put = vhost_poll_enqueue_completed(dev, queue_id, pkts,
> > +count);
> > +
> > rte_spinlock_unlock(&vq->access_lock);
> >
> > return n_pkts_put;
> > }
> >
> > +uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t
> queue_id,
> > + struct rte_mbuf **pkts, uint16_t count) {
> > + struct virtio_net *dev = get_device(vid);
> > + struct vhost_virtqueue *vq;
> > + uint16_t n_pkts = count;
> > +
> > + if (!dev)
> > + return 0;
> > +
> > + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> > + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue
> idx %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + vq = dev->virtqueue[queue_id];
> > +
> > + if (unlikely(!vq->async_registered)) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for
> queue id %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + while (count)
> > + count -= vhost_poll_enqueue_completed(dev, queue_id,
> pkts, count);
> > +
> > + return n_pkts;
> > +}
> > +
> > static __rte_always_inline uint32_t
> > virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
> > struct rte_mbuf **pkts, uint32_t count,
> >
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v2 0/3] vhost: handle memory hotplug for async vhost
2021-06-02 4:28 [dpdk-dev] [PATCH 0/2] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-06-02 4:28 ` [dpdk-dev] [PATCH 1/2] vhost: add unsafe API to drain pkts in " Cheng Jiang
2021-06-02 4:28 ` [dpdk-dev] [PATCH 2/2] vhost: handle memory hotplug for " Cheng Jiang
@ 2021-06-15 14:15 ` Cheng Jiang
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in " Cheng Jiang
` (2 more replies)
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 0/5] " Cheng Jiang
` (5 subsequent siblings)
8 siblings, 3 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-06-15 14:15 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch set is to provide an unsafe API to drain in-flight pkts
which are submitted to DMA engine in vhost async data path, and
notify the vhost application of stopping DMA transfers. And enable it
in vhost example.
v2:
* changed the patch structure
Cheng Jiang (2):
vhost: add unsafe API to drain pkts in async vhost
examples/vhost: handle memory hotplug for async vhost
Jiayu Hu (1):
vhost: handle memory hotplug for async vhost
examples/vhost/main.c | 48 +++++++++++++++++++-
examples/vhost/main.h | 1 +
lib/vhost/rte_vhost_async.h | 22 +++++++++
lib/vhost/version.map | 3 ++
lib/vhost/vhost_user.c | 9 ++++
lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
6 files changed, 148 insertions(+), 25 deletions(-)
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in async vhost
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 0/3] " Cheng Jiang
@ 2021-06-15 14:15 ` Cheng Jiang
2021-07-05 14:58 ` Pai G, Sunil
2021-07-06 14:08 ` Maxime Coquelin
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 2/3] examples/vhost: handle memory hotplug for " Cheng Jiang
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 3/3] vhost: " Cheng Jiang
2 siblings, 2 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-06-15 14:15 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
Applications need to stop DMA transfers and finish all the in-flight
pkts when in VM memory hot-plug case and async vhost is used. This
patch is to provide an unsafe API to drain in-flight pkts which are
submitted to DMA engine in vhost async data path.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
lib/vhost/rte_vhost_async.h | 22 +++++++++
lib/vhost/version.map | 3 ++
lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
3 files changed, 92 insertions(+), 23 deletions(-)
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 6faa31f5ad..041f40cf04 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -193,4 +193,26 @@ __rte_experimental
uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count);
+/**
+ * This function checks async completion status and empty all pakcets
+ * for a specific vhost device queue. Packets which are inflight will
+ * be returned in an array.
+ *
+ * @note This function does not perform any locking
+ *
+ * @param vid
+ * id of vhost device to enqueue data
+ * @param queue_id
+ * queue id to enqueue data
+ * @param pkts
+ * blank array to get return packet pointer
+ * @param count
+ * size of the packet array
+ * @return
+ * num of packets returned
+ */
+__rte_experimental
+uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count);
+
#endif /* _RTE_VHOST_ASYNC_H_ */
diff --git a/lib/vhost/version.map b/lib/vhost/version.map
index 9103a23cd4..f480f188af 100644
--- a/lib/vhost/version.map
+++ b/lib/vhost/version.map
@@ -79,4 +79,7 @@ EXPERIMENTAL {
# added in 21.05
rte_vhost_get_negotiated_protocol_features;
+
+ # added in 21.08
+ rte_vhost_drain_queue_thread_unsafe;
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 8da8a86a10..793510974a 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2082,36 +2082,18 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
} while (nr_left > 0);
}
-uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+static __rte_always_inline uint16_t
+vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
- struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
uint16_t start_idx, pkts_idx, vq_size;
struct async_inflight_info *pkts_info;
uint16_t from, i;
- if (!dev)
- return 0;
-
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
- rte_spinlock_lock(&vq->access_lock);
-
pkts_idx = vq->async_pkts_idx % vq->size;
pkts_info = vq->async_pkts_info;
vq_size = vq->size;
@@ -2119,14 +2101,14 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq_size, vq->async_pkts_inflight_n);
if (count > vq->async_last_pkts_n)
- n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+ n_pkts_cpl = vq->async_ops.check_completed_copies(dev->vid,
queue_id, 0, count - vq->async_last_pkts_n);
n_pkts_cpl += vq->async_last_pkts_n;
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
if (unlikely(n_pkts_put == 0)) {
vq->async_last_pkts_n = n_pkts_cpl;
- goto done;
+ return 0;
}
if (vq_is_packed(dev)) {
@@ -2165,12 +2147,74 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq->last_async_desc_idx_split += n_descs;
}
-done:
+ return n_pkts_put;
+}
+
+uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_put = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ n_pkts_put = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
rte_spinlock_unlock(&vq->access_lock);
return n_pkts_put;
}
+uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts = count;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ while (count)
+ count -= vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
+ return n_pkts;
+}
+
static __rte_always_inline uint32_t
virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count,
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v2 2/3] examples/vhost: handle memory hotplug for async vhost
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 0/3] " Cheng Jiang
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in " Cheng Jiang
@ 2021-06-15 14:15 ` Cheng Jiang
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 3/3] vhost: " Cheng Jiang
2 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-06-15 14:15 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
To accomplish that, we need to do these changes in the vhost sample:
1. add inflight pkt count.
2. add vring_state_changed() callback.
3. add inflight pkt drain process in destroy_device() and
vring_state_changed().
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
examples/vhost/main.c | 48 +++++++++++++++++++++++++++++++++++++++++--
examples/vhost/main.h | 1 +
2 files changed, 47 insertions(+), 2 deletions(-)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index d2179eadb9..70bb67c7f8 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -851,8 +851,11 @@ complete_async_pkts(struct vhost_dev *vdev)
complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
- if (complete_count)
+ if (complete_count) {
free_pkts(p_cpl, complete_count);
+ __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
+ }
+
}
static __rte_always_inline void
@@ -895,6 +898,7 @@ drain_vhost(struct vhost_dev *vdev)
complete_async_pkts(vdev);
ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST);
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1226,6 +1230,9 @@ drain_eth_rx(struct vhost_dev *vdev)
enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
VIRTIO_RXQ, pkts, rx_count,
m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr,
+ __ATOMIC_SEQ_CST);
+
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1397,8 +1404,15 @@ destroy_device(int vid)
"(%d) device has been removed from data core\n",
vdev->vid);
- if (async_vhost_driver)
+ if (async_vhost_driver) {
+ uint16_t n_pkt = 0;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+ n_pkt = rte_vhost_drain_queue_thread_unsafe(vid, VIRTIO_RXQ, m_cpl,
+ vdev->pkts_inflight);
+
+ free_pkts(m_cpl, n_pkt);
rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
+ }
rte_free(vdev);
}
@@ -1487,6 +1501,35 @@ new_device(int vid)
return 0;
}
+static int
+vring_state_changed(int vid, uint16_t queue_id, int enable)
+{
+ struct vhost_dev *vdev = NULL;
+
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (vdev->vid == vid)
+ break;
+ }
+ if (!vdev)
+ return -1;
+
+ if (queue_id != VIRTIO_RXQ)
+ return 0;
+
+ if (async_vhost_driver) {
+ if (!enable) {
+ uint16_t n_pkt;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+ n_pkt = rte_vhost_drain_queue_thread_unsafe(vid, queue_id,
+ m_cpl, vdev->pkts_inflight);
+ free_pkts(m_cpl, n_pkt);
+ }
+ }
+
+ return 0;
+}
+
/*
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
@@ -1495,6 +1538,7 @@ static const struct vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
+ .vring_state_changed = vring_state_changed,
};
/*
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 0ccdce4b4a..e7b1ac60a6 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -51,6 +51,7 @@ struct vhost_dev {
uint64_t features;
size_t hdr_len;
uint16_t nr_vrings;
+ uint16_t pkts_inflight;
struct rte_vhost_memory *mem;
struct device_statistics stats;
TAILQ_ENTRY(vhost_dev) global_vdev_entry;
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v2 3/3] vhost: handle memory hotplug for async vhost
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 0/3] " Cheng Jiang
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in " Cheng Jiang
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 2/3] examples/vhost: handle memory hotplug for " Cheng Jiang
@ 2021-06-15 14:15 ` Cheng Jiang
2 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-06-15 14:15 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
From: Jiayu Hu <jiayu.hu@intel.com>
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch is to notify the vhost application of stopping DMA
transfers.
Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
---
lib/vhost/vhost_user.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 8f0eba6412..6800e60c2d 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -1223,6 +1223,15 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
}
+
+ /* notify the backend application to stop DMA transfers */
+ if (dev->async_copy && dev->notify_ops->vring_state_changed) {
+ for (i = 0; i < dev->nr_vring; i++) {
+ dev->notify_ops->vring_state_changed(dev->vid,
+ i, 0);
+ }
+ }
+
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in async vhost
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in " Cheng Jiang
@ 2021-07-05 14:58 ` Pai G, Sunil
2021-07-07 14:02 ` Jiang, Cheng1
2021-07-06 14:08 ` Maxime Coquelin
1 sibling, 1 reply; 70+ messages in thread
From: Pai G, Sunil @ 2021-07-05 14:58 UTC (permalink / raw)
To: Jiang, Cheng1, maxime.coquelin, Xia, Chenbo
Cc: dev, Hu, Jiayu, Yang, YvonneX, Jiang, Cheng1
Hi Cheng,
Comments inline.
<snipped>
> +uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t
> queue_id,
> + struct rte_mbuf **pkts, uint16_t count) {
> + struct virtio_net *dev = get_device(vid);
> + struct vhost_virtqueue *vq;
> + uint16_t n_pkts = count;
> +
> + if (!dev)
> + return 0;
> +
> + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx
> %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + vq = dev->virtqueue[queue_id];
> +
> + if (unlikely(!vq->async_registered)) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for
> queue id %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + while (count)
> + count -= vhost_poll_enqueue_completed(dev, queue_id,
> pkts, count);
I think the drain API here assumes there is per virtqueue assignment of DMA device which need not be true.
If there are multiple DMA devices per virtqueue , the application would need a mechanism to change the device id per call to the drain API.
So, its probably better to just call vhost_poll_enqueue_completed here and return to the application ? and have the loop in the application instead ?
> +
> + return n_pkts;
> +}
> +
<snipped>
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in async vhost
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in " Cheng Jiang
2021-07-05 14:58 ` Pai G, Sunil
@ 2021-07-06 14:08 ` Maxime Coquelin
2021-07-08 13:46 ` Hu, Jiayu
1 sibling, 1 reply; 70+ messages in thread
From: Maxime Coquelin @ 2021-07-06 14:08 UTC (permalink / raw)
To: Cheng Jiang, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
On 6/15/21 4:15 PM, Cheng Jiang wrote:
> Applications need to stop DMA transfers and finish all the in-flight
> pkts when in VM memory hot-plug case and async vhost is used. This
> patch is to provide an unsafe API to drain in-flight pkts which are
> submitted to DMA engine in vhost async data path.
>
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
> lib/vhost/rte_vhost_async.h | 22 +++++++++
> lib/vhost/version.map | 3 ++
> lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
> 3 files changed, 92 insertions(+), 23 deletions(-)
>
> diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
> index 6faa31f5ad..041f40cf04 100644
> --- a/lib/vhost/rte_vhost_async.h
> +++ b/lib/vhost/rte_vhost_async.h
> @@ -193,4 +193,26 @@ __rte_experimental
> uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> struct rte_mbuf **pkts, uint16_t count);
>
> +/**
> + * This function checks async completion status and empty all pakcets
> + * for a specific vhost device queue. Packets which are inflight will
> + * be returned in an array.
> + *
> + * @note This function does not perform any locking
> + *
> + * @param vid
> + * id of vhost device to enqueue data
> + * @param queue_id
> + * queue id to enqueue data
> + * @param pkts
> + * blank array to get return packet pointer
> + * @param count
> + * size of the packet array
> + * @return
> + * num of packets returned
> + */
> +__rte_experimental
> +uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint16_t count);
> +
> #endif /* _RTE_VHOST_ASYNC_H_ */
> diff --git a/lib/vhost/version.map b/lib/vhost/version.map
> index 9103a23cd4..f480f188af 100644
> --- a/lib/vhost/version.map
> +++ b/lib/vhost/version.map
> @@ -79,4 +79,7 @@ EXPERIMENTAL {
>
> # added in 21.05
> rte_vhost_get_negotiated_protocol_features;
> +
> + # added in 21.08
> + rte_vhost_drain_queue_thread_unsafe;
> };
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 8da8a86a10..793510974a 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -2082,36 +2082,18 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
> } while (nr_left > 0);
> }
>
> -uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> +static __rte_always_inline uint16_t
> +vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
> struct rte_mbuf **pkts, uint16_t count)
> {
> - struct virtio_net *dev = get_device(vid);
> struct vhost_virtqueue *vq;
> uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
> uint16_t start_idx, pkts_idx, vq_size;
> struct async_inflight_info *pkts_info;
> uint16_t from, i;
>
> - if (!dev)
> - return 0;
> -
> - VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> - VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
> - dev->vid, __func__, queue_id);
> - return 0;
> - }
> -
> vq = dev->virtqueue[queue_id];
>
> - if (unlikely(!vq->async_registered)) {
> - VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
> - dev->vid, __func__, queue_id);
> - return 0;
> - }
> -
> - rte_spinlock_lock(&vq->access_lock);
> -
> pkts_idx = vq->async_pkts_idx % vq->size;
> pkts_info = vq->async_pkts_info;
> vq_size = vq->size;
> @@ -2119,14 +2101,14 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> vq_size, vq->async_pkts_inflight_n);
>
> if (count > vq->async_last_pkts_n)
> - n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
> + n_pkts_cpl = vq->async_ops.check_completed_copies(dev->vid,
> queue_id, 0, count - vq->async_last_pkts_n);
> n_pkts_cpl += vq->async_last_pkts_n;
>
> n_pkts_put = RTE_MIN(count, n_pkts_cpl);
> if (unlikely(n_pkts_put == 0)) {
> vq->async_last_pkts_n = n_pkts_cpl;
> - goto done;
> + return 0;
> }
>
> if (vq_is_packed(dev)) {
> @@ -2165,12 +2147,74 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> vq->last_async_desc_idx_split += n_descs;
> }
>
> -done:
> + return n_pkts_put;
> +}
> +
> +uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint16_t count)
> +{
> + struct virtio_net *dev = get_device(vid);
> + struct vhost_virtqueue *vq;
> + uint16_t n_pkts_put = 0;
> +
> + if (!dev)
> + return 0;
> +
> + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + vq = dev->virtqueue[queue_id];
> +
> + if (unlikely(!vq->async_registered)) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + rte_spinlock_lock(&vq->access_lock);
> +
> + n_pkts_put = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
> +
> rte_spinlock_unlock(&vq->access_lock);
>
> return n_pkts_put;
> }
>
> +uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint16_t count)
> +{
> + struct virtio_net *dev = get_device(vid);
> + struct vhost_virtqueue *vq;
> + uint16_t n_pkts = count;
> +
> + if (!dev)
> + return 0;
> +
> + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + vq = dev->virtqueue[queue_id];
> +
> + if (unlikely(!vq->async_registered)) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + while (count)
> + count -= vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
I think we may want to improve the sync_ops so that
.check_completed_copies() returns an int. If for some reason the DMA
driver callback fails, we would poll forever.
Looking more into the code, I see that ioat_check_completed_copies_cb()
an return -1 (whereas it should return an unint32_t). It would lead to
undefined behaviour if the failure would happen. The IOAT driver needs
to be fixed, and also the callback prototype and its handling.
> +
> + return n_pkts;
> +}
> +
> static __rte_always_inline uint32_t
> virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
> struct rte_mbuf **pkts, uint32_t count,
>
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in async vhost
2021-07-05 14:58 ` Pai G, Sunil
@ 2021-07-07 14:02 ` Jiang, Cheng1
2021-07-08 7:15 ` Pai G, Sunil
0 siblings, 1 reply; 70+ messages in thread
From: Jiang, Cheng1 @ 2021-07-07 14:02 UTC (permalink / raw)
To: Pai G, Sunil, maxime.coquelin, Xia, Chenbo; +Cc: dev, Hu, Jiayu, Yang, YvonneX
Hi Sunil,
Replies are inline.
> -----Original Message-----
> From: Pai G, Sunil <sunil.pai.g@intel.com>
> Sent: Monday, July 5, 2021 10:58 PM
> To: Jiang, Cheng1 <cheng1.jiang@intel.com>; maxime.coquelin@redhat.com;
> Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>
> Subject: RE: [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in
> async vhost
>
> Hi Cheng,
>
> Comments inline.
>
> <snipped>
>
> > +uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t
> > queue_id,
> > + struct rte_mbuf **pkts, uint16_t count) {
> > + struct virtio_net *dev = get_device(vid);
> > + struct vhost_virtqueue *vq;
> > + uint16_t n_pkts = count;
> > +
> > + if (!dev)
> > + return 0;
> > +
> > + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> > + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx
> > %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + vq = dev->virtqueue[queue_id];
> > +
> > + if (unlikely(!vq->async_registered)) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for
> > queue id %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + while (count)
> > + count -= vhost_poll_enqueue_completed(dev, queue_id,
> > pkts, count);
>
> I think the drain API here assumes there is per virtqueue assignment of DMA
> device which need not be true.
> If there are multiple DMA devices per virtqueue , the application would need
> a mechanism to change the device id per call to the drain API.
For now our async vhost didn't have the support for the usage of multiple DMA devices per virtqueue.
It's better that we change all the APIs at once in the future for the consistency if you need it.
> So, its probably better to just call vhost_poll_enqueue_completed here and
> return to the application ? and have the loop in the application instead ?
As for this one, I'm not sure why we need have the loop in the application.
The function of this API is that caller need to drain all the inflight pkts, it should be called only once to get the job done.
Don't you think?
Thanks.
Cheng
>
> > +
> > + return n_pkts;
> > +}
> > +
>
> <snipped>
>
>
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in async vhost
2021-07-07 14:02 ` Jiang, Cheng1
@ 2021-07-08 7:15 ` Pai G, Sunil
2021-07-12 6:31 ` Jiang, Cheng1
0 siblings, 1 reply; 70+ messages in thread
From: Pai G, Sunil @ 2021-07-08 7:15 UTC (permalink / raw)
To: Jiang, Cheng1, maxime.coquelin, Xia, Chenbo; +Cc: dev, Hu, Jiayu, Yang, YvonneX
Hi Cheng,
Repsonse inline.
<snipped>
> As for this one, I'm not sure why we need have the loop in the application.
> The function of this API is that caller need to drain all the inflight pkts, it
> should be called only once to get the job done.
> Don't you think?
Perhaps yes, but my thought was to provide application the flexibility to change the DMA device per call to check_completed_copies callback if it did require it.
>
> Thanks.
> Cheng
>
> >
> > > +
> > > + return n_pkts;
> > > +}
> > > +
> >
> > <snipped>
> >
> >
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in async vhost
2021-07-06 14:08 ` Maxime Coquelin
@ 2021-07-08 13:46 ` Hu, Jiayu
0 siblings, 0 replies; 70+ messages in thread
From: Hu, Jiayu @ 2021-07-08 13:46 UTC (permalink / raw)
To: Maxime Coquelin, Jiang, Cheng1, Xia, Chenbo; +Cc: dev, Yang, YvonneX
Hi Maxime,
> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Tuesday, July 6, 2021 10:09 PM
> To: Jiang, Cheng1 <cheng1.jiang@intel.com>; Xia, Chenbo
> <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>
> Subject: Re: [PATCH v2 1/3] vhost: add unsafe API to drain pkts in async vhost
>
>
>
> On 6/15/21 4:15 PM, Cheng Jiang wrote:
> > Applications need to stop DMA transfers and finish all the in-flight
> > pkts when in VM memory hot-plug case and async vhost is used. This
> > patch is to provide an unsafe API to drain in-flight pkts which are
> > submitted to DMA engine in vhost async data path.
> >
> > Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> > ---
> > lib/vhost/rte_vhost_async.h | 22 +++++++++
> > lib/vhost/version.map | 3 ++
> > lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
> > 3 files changed, 92 insertions(+), 23 deletions(-)
> >
> > diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
> > index 6faa31f5ad..041f40cf04 100644
> > --- a/lib/vhost/rte_vhost_async.h
> > +++ b/lib/vhost/rte_vhost_async.h
> > @@ -193,4 +193,26 @@ __rte_experimental uint16_t
> > rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > struct rte_mbuf **pkts, uint16_t count);
> >
> > +/**
> > + * This function checks async completion status and empty all pakcets
> > + * for a specific vhost device queue. Packets which are inflight will
> > + * be returned in an array.
> > + *
> > + * @note This function does not perform any locking
> > + *
> > + * @param vid
> > + * id of vhost device to enqueue data
> > + * @param queue_id
> > + * queue id to enqueue data
> > + * @param pkts
> > + * blank array to get return packet pointer
> > + * @param count
> > + * size of the packet array
> > + * @return
> > + * num of packets returned
> > + */
> > +__rte_experimental
> > +uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t
> queue_id,
> > + struct rte_mbuf **pkts, uint16_t count);
> > +
> > #endif /* _RTE_VHOST_ASYNC_H_ */
> > diff --git a/lib/vhost/version.map b/lib/vhost/version.map index
> > 9103a23cd4..f480f188af 100644
> > --- a/lib/vhost/version.map
> > +++ b/lib/vhost/version.map
> > @@ -79,4 +79,7 @@ EXPERIMENTAL {
> >
> > # added in 21.05
> > rte_vhost_get_negotiated_protocol_features;
> > +
> > + # added in 21.08
> > + rte_vhost_drain_queue_thread_unsafe;
> > };
> > diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index
> > 8da8a86a10..793510974a 100644
> > --- a/lib/vhost/virtio_net.c
> > +++ b/lib/vhost/virtio_net.c
> > @@ -2082,36 +2082,18 @@ write_back_completed_descs_packed(struct
> vhost_virtqueue *vq,
> > } while (nr_left > 0);
> > }
> >
> > -uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > +static __rte_always_inline uint16_t
> > +vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t
> > +queue_id,
> > struct rte_mbuf **pkts, uint16_t count) {
> > - struct virtio_net *dev = get_device(vid);
> > struct vhost_virtqueue *vq;
> > uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
> > uint16_t start_idx, pkts_idx, vq_size;
> > struct async_inflight_info *pkts_info;
> > uint16_t from, i;
> >
> > - if (!dev)
> > - return 0;
> > -
> > - VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> > - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> > - VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue
> idx %d.\n",
> > - dev->vid, __func__, queue_id);
> > - return 0;
> > - }
> > -
> > vq = dev->virtqueue[queue_id];
> >
> > - if (unlikely(!vq->async_registered)) {
> > - VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for
> queue id %d.\n",
> > - dev->vid, __func__, queue_id);
> > - return 0;
> > - }
> > -
> > - rte_spinlock_lock(&vq->access_lock);
> > -
> > pkts_idx = vq->async_pkts_idx % vq->size;
> > pkts_info = vq->async_pkts_info;
> > vq_size = vq->size;
> > @@ -2119,14 +2101,14 @@ uint16_t
> rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > vq_size, vq->async_pkts_inflight_n);
> >
> > if (count > vq->async_last_pkts_n)
> > - n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
> > + n_pkts_cpl = vq->async_ops.check_completed_copies(dev-
> >vid,
> > queue_id, 0, count - vq->async_last_pkts_n);
> > n_pkts_cpl += vq->async_last_pkts_n;
> >
> > n_pkts_put = RTE_MIN(count, n_pkts_cpl);
> > if (unlikely(n_pkts_put == 0)) {
> > vq->async_last_pkts_n = n_pkts_cpl;
> > - goto done;
> > + return 0;
> > }
> >
> > if (vq_is_packed(dev)) {
> > @@ -2165,12 +2147,74 @@ uint16_t
> rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > vq->last_async_desc_idx_split += n_descs;
> > }
> >
> > -done:
> > + return n_pkts_put;
> > +}
> > +
> > +uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > + struct rte_mbuf **pkts, uint16_t count) {
> > + struct virtio_net *dev = get_device(vid);
> > + struct vhost_virtqueue *vq;
> > + uint16_t n_pkts_put = 0;
> > +
> > + if (!dev)
> > + return 0;
> > +
> > + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> > + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue
> idx %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + vq = dev->virtqueue[queue_id];
> > +
> > + if (unlikely(!vq->async_registered)) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for
> queue id %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + rte_spinlock_lock(&vq->access_lock);
> > +
> > + n_pkts_put = vhost_poll_enqueue_completed(dev, queue_id, pkts,
> > +count);
> > +
> > rte_spinlock_unlock(&vq->access_lock);
> >
> > return n_pkts_put;
> > }
> >
> > +uint16_t rte_vhost_drain_queue_thread_unsafe(int vid, uint16_t
> queue_id,
> > + struct rte_mbuf **pkts, uint16_t count) {
> > + struct virtio_net *dev = get_device(vid);
> > + struct vhost_virtqueue *vq;
> > + uint16_t n_pkts = count;
> > +
> > + if (!dev)
> > + return 0;
> > +
> > + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> > + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue
> idx %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + vq = dev->virtqueue[queue_id];
> > +
> > + if (unlikely(!vq->async_registered)) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for
> queue id %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + while (count)
> > + count -= vhost_poll_enqueue_completed(dev, queue_id, pkts,
> count);
>
> I think we may want to improve the sync_ops so that
> .check_completed_copies() returns an int. If for some reason the DMA driver
> callback fails, we would poll forever.
Agree, it makes sense to change the return type of .check_completed_copies() to int
to report callback failure case.
>
> Looking more into the code, I see that ioat_check_completed_copies_cb() an
> return -1 (whereas it should return an unint32_t). It would lead to undefined
> behaviour if the failure would happen. The IOAT driver needs to be fixed,
> and also the callback prototype and its handling.
Current async design only handles .transfer_data() failure, but requires
.check_completed_copies() to handle DMA copy failure. That is,
.check_completed_copies() always reports all copy success to vhost,
even if a DMA copy fails. And it can fall back to SW copy for the failed
DMA copies.
The another choice is to make vhost handle DMA copy failure. Although IOAT
driver already supports to report failed copies, dmadev is under discussion.
I am not sure if the interface will change after we have dmadev. So maybe
it's better to leave it open until we have dmadev. How do you think?
Thanks,
Jiayu
>
> > +
> > + return n_pkts;
> > +}
> > +
> > static __rte_always_inline uint32_t
> > virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
> > struct rte_mbuf **pkts, uint32_t count,
> >
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in async vhost
2021-07-08 7:15 ` Pai G, Sunil
@ 2021-07-12 6:31 ` Jiang, Cheng1
0 siblings, 0 replies; 70+ messages in thread
From: Jiang, Cheng1 @ 2021-07-12 6:31 UTC (permalink / raw)
To: Pai G, Sunil, maxime.coquelin, Xia, Chenbo; +Cc: dev, Hu, Jiayu, Yang, YvonneX
Hi,
> -----Original Message-----
> From: Pai G, Sunil <sunil.pai.g@intel.com>
> Sent: Thursday, July 8, 2021 3:15 PM
> To: Jiang, Cheng1 <cheng1.jiang@intel.com>; maxime.coquelin@redhat.com;
> Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>
> Subject: RE: [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in
> async vhost
>
> Hi Cheng,
>
> Repsonse inline.
>
> <snipped>
>
> > As for this one, I'm not sure why we need have the loop in the application.
> > The function of this API is that caller need to drain all the inflight
> > pkts, it should be called only once to get the job done.
> > Don't you think?
>
> Perhaps yes, but my thought was to provide application the flexibility to
> change the DMA device per call to check_completed_copies callback if it did
> require it.
Got your point.
So you think this API should be defined as the unsafe version of poll completed API, right?
Thanks,
Cheng
>
> >
> > Thanks.
> > Cheng
> >
> > >
> > > > +
> > > > + return n_pkts;
> > > > +}
> > > > +
> > >
> > > <snipped>
> > >
> > >
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v3 0/5] vhost: handle memory hotplug for async vhost
2021-06-02 4:28 [dpdk-dev] [PATCH 0/2] vhost: handle memory hotplug for async vhost Cheng Jiang
` (2 preceding siblings ...)
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 0/3] " Cheng Jiang
@ 2021-07-14 9:01 ` Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 1/5] vhost: fix async vhost ops return type Cheng Jiang
` (4 more replies)
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
` (4 subsequent siblings)
8 siblings, 5 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-14 9:01 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch set is to provide an unsafe API to drain in-flight pkts
which are submitted to DMA engine in vhost async data path, and
notify the vhost application of stopping DMA transfers. And enable it
in vhost example.
v3:
* added a patch to fix async ops return type
* fixed async ops fail handler
* updated the doc
v2:
* changed the patch structure
Cheng Jiang (4):
vhost: fix async vhost ops return type
vhost: add unsafe API to drain pkts in async vhost
examples/vhost: handle memory hotplug for async vhost
doc: update doc for try drain API in vhost lib
Jiayu Hu (1):
vhost: handle memory hotplug for async vhost
doc/guides/prog_guide/vhost_lib.rst | 5 +
doc/guides/rel_notes/release_21_08.rst | 5 +
examples/vhost/ioat.c | 4 +-
examples/vhost/ioat.h | 4 +-
examples/vhost/main.c | 48 +++++++-
examples/vhost/main.h | 1 +
lib/vhost/rte_vhost_async.h | 28 ++++-
lib/vhost/version.map | 3 +
lib/vhost/vhost_user.c | 9 ++
lib/vhost/virtio_net.c | 146 ++++++++++++++++++++-----
10 files changed, 215 insertions(+), 38 deletions(-)
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v3 1/5] vhost: fix async vhost ops return type
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 0/5] " Cheng Jiang
@ 2021-07-14 9:01 ` Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 2/5] vhost: add unsafe API to drain pkts in async vhost Cheng Jiang
` (3 subsequent siblings)
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-14 9:01 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
The async vhost ops callback should return -1 when there are something
wrong in the callback, so the return type should be changed into
int32_t. The issue in vhost example is also fixed in this patch.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
examples/vhost/ioat.c | 4 +--
examples/vhost/ioat.h | 4 +--
lib/vhost/rte_vhost_async.h | 4 +--
lib/vhost/virtio_net.c | 58 ++++++++++++++++++++++++++++++++-----
4 files changed, 56 insertions(+), 14 deletions(-)
diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index 2a2c2d7202..457f8171f0 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -122,7 +122,7 @@ open_ioat(const char *value)
return ret;
}
-uint32_t
+int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count)
@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
return i_desc;
}
-uint32_t
+int32_t
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets)
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 1aa28ed6a3..b57b5645b0 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -27,12 +27,12 @@ struct dma_for_vhost {
#ifdef RTE_RAW_IOAT
int open_ioat(const char *value);
-uint32_t
+int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count);
-uint32_t
+int32_t
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 6faa31f5ad..bc81cd0caa 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -61,7 +61,7 @@ struct rte_vhost_async_channel_ops {
* @return
* number of descs processed
*/
- uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+ int32_t (*transfer_data)(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data,
uint16_t count);
@@ -78,7 +78,7 @@ struct rte_vhost_async_channel_ops {
* @return
* number of async descs completed
*/
- uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+ int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index b93482587c..8156796a46 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1528,6 +1528,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
struct async_inflight_info *pkts_info = vq->async_pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
uint32_t num_async_pkts = 0, num_done_pkts = 0;
+ int32_t n_enq;
struct {
uint16_t pkt_idx;
uint16_t last_avail_idx;
@@ -1608,8 +1609,16 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
+ n_enq = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
+ if (n_enq >= 0) {
+ n_pkts = n_enq;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
@@ -1632,8 +1641,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
}
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
- queue_id, tdes, 0, pkt_burst_idx);
+ n_enq = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_enq >= 0) {
+ n_pkts = n_enq;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
@@ -1903,6 +1919,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
uint16_t async_descs_idx = 0;
uint16_t num_buffers;
uint16_t num_descs;
+ int32_t n_enq;
struct rte_vhost_iov_iter *it_pool = vq->it_pool;
struct iovec *vec_pool = vq->vec_pool;
@@ -1983,8 +2000,16 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
*/
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
- tdes, 0, pkt_burst_idx);
+ n_enq = vq->async_ops.transfer_data(dev->vid,
+ queue_id, tdes, 0, pkt_burst_idx);
+ if (n_enq >= 0) {
+ n_pkts = n_enq;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
segs_await = 0;
@@ -2006,7 +2031,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
} while (pkt_idx < count);
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ n_enq = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_enq >= 0) {
+ n_pkts = n_enq;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
@@ -2091,6 +2124,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
uint16_t start_idx, pkts_idx, vq_size;
struct async_inflight_info *pkts_info;
uint16_t from, i;
+ int32_t n_poll;
if (!dev)
return 0;
@@ -2118,9 +2152,17 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
- if (count > vq->async_last_pkts_n)
- n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+ if (count > vq->async_last_pkts_n) {
+ n_poll = vq->async_ops.check_completed_copies(vid,
queue_id, 0, count - vq->async_last_pkts_n);
+ if (n_poll >= 0) {
+ n_pkts_cpl = n_poll;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts_cpl = 0;
+ }
+ }
n_pkts_cpl += vq->async_last_pkts_n;
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v3 2/5] vhost: add unsafe API to drain pkts in async vhost
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 0/5] " Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 1/5] vhost: fix async vhost ops return type Cheng Jiang
@ 2021-07-14 9:01 ` Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 3/5] vhost: handle memory hotplug for " Cheng Jiang
` (2 subsequent siblings)
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-14 9:01 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
Applications need to stop DMA transfers and finish all the in-flight
pkts when in VM memory hot-plug case and async vhost is used. This
patch is to provide an unsafe API to drain in-flight pkts which are
submitted to DMA engine in vhost async data path.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
lib/vhost/rte_vhost_async.h | 24 ++++++++++
lib/vhost/version.map | 3 ++
lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
3 files changed, 94 insertions(+), 23 deletions(-)
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index bc81cd0caa..fd622631b2 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -193,4 +193,28 @@ __rte_experimental
uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count);
+/**
+ * This function checks async completion status and empty all pakcets
+ * for a specific vhost device queue. Packets which are inflight will
+ * be returned in an array.
+ *
+ * @note This function does not perform any locking
+ *
+ * @param vid
+ * id of vhost device to enqueue data
+ * @param queue_id
+ * queue id to enqueue data
+ * @param pkts
+ * blank array to get return packet pointer
+ * @param count
+ * size of the packet array
+ * @param times
+ * max number of poll attempts
+ * @return
+ * num of packets returned
+ */
+__rte_experimental
+uint16_t rte_vhost_try_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count, uint16_t times);
+
#endif /* _RTE_VHOST_ASYNC_H_ */
diff --git a/lib/vhost/version.map b/lib/vhost/version.map
index 9103a23cd4..b8fc8770dd 100644
--- a/lib/vhost/version.map
+++ b/lib/vhost/version.map
@@ -79,4 +79,7 @@ EXPERIMENTAL {
# added in 21.05
rte_vhost_get_negotiated_protocol_features;
+
+ # added in 21.08
+ rte_vhost_try_drain_queue_thread_unsafe;
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 8156796a46..9f541679b9 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2115,10 +2115,10 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
} while (nr_left > 0);
}
-uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+static __rte_always_inline uint16_t
+vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
- struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
uint16_t start_idx, pkts_idx, vq_size;
@@ -2126,26 +2126,8 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
uint16_t from, i;
int32_t n_poll;
- if (!dev)
- return 0;
-
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
- rte_spinlock_lock(&vq->access_lock);
-
pkts_idx = vq->async_pkts_idx % vq->size;
pkts_info = vq->async_pkts_info;
vq_size = vq->size;
@@ -2153,7 +2135,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq_size, vq->async_pkts_inflight_n);
if (count > vq->async_last_pkts_n) {
- n_poll = vq->async_ops.check_completed_copies(vid,
+ n_poll = vq->async_ops.check_completed_copies(dev->vid,
queue_id, 0, count - vq->async_last_pkts_n);
if (n_poll >= 0) {
n_pkts_cpl = n_poll;
@@ -2168,7 +2150,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
if (unlikely(n_pkts_put == 0)) {
vq->async_last_pkts_n = n_pkts_cpl;
- goto done;
+ return 0;
}
if (vq_is_packed(dev)) {
@@ -2207,12 +2189,74 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq->last_async_desc_idx_split += n_descs;
}
-done:
+ return n_pkts_put;
+}
+
+uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_put = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ n_pkts_put = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
rte_spinlock_unlock(&vq->access_lock);
return n_pkts_put;
}
+uint16_t rte_vhost_try_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count, uint16_t times)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_cpl = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ while ((n_cpl < count) && times--)
+ n_cpl += vhost_poll_enqueue_completed(dev, queue_id, pkts + n_cpl, count);
+
+ return n_cpl;
+}
+
static __rte_always_inline uint32_t
virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count,
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v3 3/5] vhost: handle memory hotplug for async vhost
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 0/5] " Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 2/5] vhost: add unsafe API to drain pkts in async vhost Cheng Jiang
@ 2021-07-14 9:01 ` Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 4/5] examples/vhost: " Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 5/5] doc: update doc for try drain API in vhost lib Cheng Jiang
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-14 9:01 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
From: Jiayu Hu <jiayu.hu@intel.com>
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch is to notify the vhost application of stopping DMA
transfers.
Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
---
lib/vhost/vhost_user.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 031c578e54..39e8432d1c 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -1275,6 +1275,15 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
}
+
+ /* notify the backend application to stop DMA transfers */
+ if (dev->async_copy && dev->notify_ops->vring_state_changed) {
+ for (i = 0; i < dev->nr_vring; i++) {
+ dev->notify_ops->vring_state_changed(dev->vid,
+ i, 0);
+ }
+ }
+
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v3 4/5] examples/vhost: handle memory hotplug for async vhost
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 0/5] " Cheng Jiang
` (2 preceding siblings ...)
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 3/5] vhost: handle memory hotplug for " Cheng Jiang
@ 2021-07-14 9:01 ` Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 5/5] doc: update doc for try drain API in vhost lib Cheng Jiang
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-14 9:01 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
To accomplish that, we need to do these changes in the vhost sample:
1. add inflight pkt count.
2. add vring_state_changed() callback.
3. add inflight pkt drain process in destroy_device() and
vring_state_changed().
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
examples/vhost/main.c | 48 +++++++++++++++++++++++++++++++++++++++++--
examples/vhost/main.h | 1 +
2 files changed, 47 insertions(+), 2 deletions(-)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index d2179eadb9..9014c999be 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -851,8 +851,11 @@ complete_async_pkts(struct vhost_dev *vdev)
complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
- if (complete_count)
+ if (complete_count) {
free_pkts(p_cpl, complete_count);
+ __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
+ }
+
}
static __rte_always_inline void
@@ -895,6 +898,7 @@ drain_vhost(struct vhost_dev *vdev)
complete_async_pkts(vdev);
ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST);
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1226,6 +1230,9 @@ drain_eth_rx(struct vhost_dev *vdev)
enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
VIRTIO_RXQ, pkts, rx_count,
m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr,
+ __ATOMIC_SEQ_CST);
+
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1397,8 +1404,15 @@ destroy_device(int vid)
"(%d) device has been removed from data core\n",
vdev->vid);
- if (async_vhost_driver)
+ if (async_vhost_driver) {
+ uint16_t n_pkt = 0;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+ n_pkt = rte_vhost_try_drain_queue_thread_unsafe(vid, VIRTIO_RXQ, m_cpl,
+ vdev->pkts_inflight, 2);
+
+ free_pkts(m_cpl, n_pkt);
rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
+ }
rte_free(vdev);
}
@@ -1487,6 +1501,35 @@ new_device(int vid)
return 0;
}
+static int
+vring_state_changed(int vid, uint16_t queue_id, int enable)
+{
+ struct vhost_dev *vdev = NULL;
+
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (vdev->vid == vid)
+ break;
+ }
+ if (!vdev)
+ return -1;
+
+ if (queue_id != VIRTIO_RXQ)
+ return 0;
+
+ if (async_vhost_driver) {
+ if (!enable) {
+ uint16_t n_pkt;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+ n_pkt = rte_vhost_try_drain_queue_thread_unsafe(vid, queue_id,
+ m_cpl, vdev->pkts_inflight, 2);
+ free_pkts(m_cpl, n_pkt);
+ }
+ }
+
+ return 0;
+}
+
/*
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
@@ -1495,6 +1538,7 @@ static const struct vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
+ .vring_state_changed = vring_state_changed,
};
/*
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 0ccdce4b4a..e7b1ac60a6 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -51,6 +51,7 @@ struct vhost_dev {
uint64_t features;
size_t hdr_len;
uint16_t nr_vrings;
+ uint16_t pkts_inflight;
struct rte_vhost_memory *mem;
struct device_statistics stats;
TAILQ_ENTRY(vhost_dev) global_vdev_entry;
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v3 5/5] doc: update doc for try drain API in vhost lib
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 0/5] " Cheng Jiang
` (3 preceding siblings ...)
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 4/5] examples/vhost: " Cheng Jiang
@ 2021-07-14 9:01 ` Cheng Jiang
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-14 9:01 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
update the program guide and release notes for try drain API in vhost
lib.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
doc/guides/prog_guide/vhost_lib.rst | 5 +++++
doc/guides/rel_notes/release_21_08.rst | 5 +++++
2 files changed, 10 insertions(+)
diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index d18fb98910..85aabc4a75 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -281,6 +281,11 @@ The following is an overview of some key Vhost API functions:
Poll enqueue completion status from async data path. Completed packets
are returned to applications through ``pkts``.
+* ``rte_vhost_try_drain_queue_thread_unsafe(vid, queue_id, **pkts, count, times)``
+
+ Try to drain in-flight packets which are submitted to DMA engine in vhost async data
+ path. Completed packets are returned to applications through ``pkts``.
+
Vhost-user Implementations
--------------------------
diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
index a6ecfdf3ce..d1e5df2003 100644
--- a/doc/guides/rel_notes/release_21_08.rst
+++ b/doc/guides/rel_notes/release_21_08.rst
@@ -55,6 +55,11 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Added try drain API in vhost library.**
+
+ Added an API which can try to drain the inflight packets submitted to DMA
+ engine in vhost async data path.
+
Removed Items
-------------
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v4 0/5] vhost: handle memory hotplug for async vhost
2021-06-02 4:28 [dpdk-dev] [PATCH 0/2] vhost: handle memory hotplug for async vhost Cheng Jiang
` (3 preceding siblings ...)
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 0/5] " Cheng Jiang
@ 2021-07-16 2:59 ` Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 1/5] vhost: fix async vhost ops return type Cheng Jiang
` (4 more replies)
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
` (3 subsequent siblings)
8 siblings, 5 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-16 2:59 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch set is to provide an unsafe API to drain in-flight pkts
which are submitted to DMA engine in vhost async data path, and
notify the vhost application of stopping DMA transfers. And enable it
in vhost example.
v4:
* rebased on the latest codess
v3:
* added a patch to fix async ops return type
* fixed async ops fail handler
* updated the doc
v2:
* changed the patch structure
Cheng Jiang (4):
vhost: fix async vhost ops return type
vhost: add unsafe API to drain pkts in async vhost
examples/vhost: handle memory hotplug for async vhost
doc: update doc for try drain API in vhost lib
Jiayu Hu (1):
vhost: handle memory hotplug for async vhost
doc/guides/prog_guide/vhost_lib.rst | 5 +
doc/guides/rel_notes/release_21_08.rst | 5 +
examples/vhost/ioat.c | 4 +-
examples/vhost/ioat.h | 4 +-
examples/vhost/main.c | 48 +++++++-
examples/vhost/main.h | 1 +
lib/vhost/rte_vhost_async.h | 28 ++++-
lib/vhost/version.map | 3 +
lib/vhost/vhost_user.c | 9 ++
lib/vhost/virtio_net.c | 146 ++++++++++++++++++++-----
10 files changed, 215 insertions(+), 38 deletions(-)
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v4 1/5] vhost: fix async vhost ops return type
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
@ 2021-07-16 2:59 ` Cheng Jiang
2021-07-16 5:36 ` Xia, Chenbo
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 2/5] vhost: add unsafe API to drain pkts in async vhost Cheng Jiang
` (3 subsequent siblings)
4 siblings, 1 reply; 70+ messages in thread
From: Cheng Jiang @ 2021-07-16 2:59 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
The async vhost ops callback should return -1 when there are something
wrong in the callback, so the return type should be changed into
int32_t. The issue in vhost example is also fixed in this patch.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
examples/vhost/ioat.c | 4 +--
examples/vhost/ioat.h | 4 +--
lib/vhost/rte_vhost_async.h | 4 +--
lib/vhost/virtio_net.c | 58 ++++++++++++++++++++++++++++++++-----
4 files changed, 56 insertions(+), 14 deletions(-)
diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index 2a2c2d7202..457f8171f0 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -122,7 +122,7 @@ open_ioat(const char *value)
return ret;
}
-uint32_t
+int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count)
@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
return i_desc;
}
-uint32_t
+int32_t
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets)
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 1aa28ed6a3..b57b5645b0 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -27,12 +27,12 @@ struct dma_for_vhost {
#ifdef RTE_RAW_IOAT
int open_ioat(const char *value);
-uint32_t
+int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count);
-uint32_t
+int32_t
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 6faa31f5ad..bc81cd0caa 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -61,7 +61,7 @@ struct rte_vhost_async_channel_ops {
* @return
* number of descs processed
*/
- uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+ int32_t (*transfer_data)(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data,
uint16_t count);
@@ -78,7 +78,7 @@ struct rte_vhost_async_channel_ops {
* @return
* number of async descs completed
*/
- uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+ int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index b93482587c..8156796a46 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1528,6 +1528,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
struct async_inflight_info *pkts_info = vq->async_pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
uint32_t num_async_pkts = 0, num_done_pkts = 0;
+ int32_t n_enq;
struct {
uint16_t pkt_idx;
uint16_t last_avail_idx;
@@ -1608,8 +1609,16 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
+ n_enq = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
+ if (n_enq >= 0) {
+ n_pkts = n_enq;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
@@ -1632,8 +1641,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
}
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
- queue_id, tdes, 0, pkt_burst_idx);
+ n_enq = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_enq >= 0) {
+ n_pkts = n_enq;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
@@ -1903,6 +1919,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
uint16_t async_descs_idx = 0;
uint16_t num_buffers;
uint16_t num_descs;
+ int32_t n_enq;
struct rte_vhost_iov_iter *it_pool = vq->it_pool;
struct iovec *vec_pool = vq->vec_pool;
@@ -1983,8 +2000,16 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
*/
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
- tdes, 0, pkt_burst_idx);
+ n_enq = vq->async_ops.transfer_data(dev->vid,
+ queue_id, tdes, 0, pkt_burst_idx);
+ if (n_enq >= 0) {
+ n_pkts = n_enq;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
segs_await = 0;
@@ -2006,7 +2031,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
} while (pkt_idx < count);
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ n_enq = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_enq >= 0) {
+ n_pkts = n_enq;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
@@ -2091,6 +2124,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
uint16_t start_idx, pkts_idx, vq_size;
struct async_inflight_info *pkts_info;
uint16_t from, i;
+ int32_t n_poll;
if (!dev)
return 0;
@@ -2118,9 +2152,17 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
- if (count > vq->async_last_pkts_n)
- n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+ if (count > vq->async_last_pkts_n) {
+ n_poll = vq->async_ops.check_completed_copies(vid,
queue_id, 0, count - vq->async_last_pkts_n);
+ if (n_poll >= 0) {
+ n_pkts_cpl = n_poll;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts_cpl = 0;
+ }
+ }
n_pkts_cpl += vq->async_last_pkts_n;
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v4 2/5] vhost: add unsafe API to drain pkts in async vhost
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 1/5] vhost: fix async vhost ops return type Cheng Jiang
@ 2021-07-16 2:59 ` Cheng Jiang
2021-07-16 8:56 ` Xia, Chenbo
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 3/5] vhost: handle memory hotplug for " Cheng Jiang
` (2 subsequent siblings)
4 siblings, 1 reply; 70+ messages in thread
From: Cheng Jiang @ 2021-07-16 2:59 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
Applications need to stop DMA transfers and finish all the in-flight
pkts when in VM memory hot-plug case and async vhost is used. This
patch is to provide an unsafe API to drain in-flight pkts which are
submitted to DMA engine in vhost async data path.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
lib/vhost/rte_vhost_async.h | 24 ++++++++++
lib/vhost/version.map | 3 ++
lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
3 files changed, 94 insertions(+), 23 deletions(-)
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index bc81cd0caa..fd622631b2 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -193,4 +193,28 @@ __rte_experimental
uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count);
+/**
+ * This function checks async completion status and empty all pakcets
+ * for a specific vhost device queue. Packets which are inflight will
+ * be returned in an array.
+ *
+ * @note This function does not perform any locking
+ *
+ * @param vid
+ * id of vhost device to enqueue data
+ * @param queue_id
+ * queue id to enqueue data
+ * @param pkts
+ * blank array to get return packet pointer
+ * @param count
+ * size of the packet array
+ * @param times
+ * max number of poll attempts
+ * @return
+ * num of packets returned
+ */
+__rte_experimental
+uint16_t rte_vhost_try_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count, uint16_t times);
+
#endif /* _RTE_VHOST_ASYNC_H_ */
diff --git a/lib/vhost/version.map b/lib/vhost/version.map
index 9103a23cd4..b8fc8770dd 100644
--- a/lib/vhost/version.map
+++ b/lib/vhost/version.map
@@ -79,4 +79,7 @@ EXPERIMENTAL {
# added in 21.05
rte_vhost_get_negotiated_protocol_features;
+
+ # added in 21.08
+ rte_vhost_try_drain_queue_thread_unsafe;
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 8156796a46..9f541679b9 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2115,10 +2115,10 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
} while (nr_left > 0);
}
-uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+static __rte_always_inline uint16_t
+vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
- struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
uint16_t start_idx, pkts_idx, vq_size;
@@ -2126,26 +2126,8 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
uint16_t from, i;
int32_t n_poll;
- if (!dev)
- return 0;
-
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
- rte_spinlock_lock(&vq->access_lock);
-
pkts_idx = vq->async_pkts_idx % vq->size;
pkts_info = vq->async_pkts_info;
vq_size = vq->size;
@@ -2153,7 +2135,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq_size, vq->async_pkts_inflight_n);
if (count > vq->async_last_pkts_n) {
- n_poll = vq->async_ops.check_completed_copies(vid,
+ n_poll = vq->async_ops.check_completed_copies(dev->vid,
queue_id, 0, count - vq->async_last_pkts_n);
if (n_poll >= 0) {
n_pkts_cpl = n_poll;
@@ -2168,7 +2150,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
if (unlikely(n_pkts_put == 0)) {
vq->async_last_pkts_n = n_pkts_cpl;
- goto done;
+ return 0;
}
if (vq_is_packed(dev)) {
@@ -2207,12 +2189,74 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq->last_async_desc_idx_split += n_descs;
}
-done:
+ return n_pkts_put;
+}
+
+uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_put = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ n_pkts_put = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
rte_spinlock_unlock(&vq->access_lock);
return n_pkts_put;
}
+uint16_t rte_vhost_try_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count, uint16_t times)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_cpl = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ while ((n_cpl < count) && times--)
+ n_cpl += vhost_poll_enqueue_completed(dev, queue_id, pkts + n_cpl, count);
+
+ return n_cpl;
+}
+
static __rte_always_inline uint32_t
virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count,
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v4 3/5] vhost: handle memory hotplug for async vhost
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 2/5] vhost: add unsafe API to drain pkts in async vhost Cheng Jiang
@ 2021-07-16 2:59 ` Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 4/5] examples/vhost: " Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 5/5] doc: update doc for try drain API in vhost lib Cheng Jiang
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-16 2:59 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
From: Jiayu Hu <jiayu.hu@intel.com>
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch is to notify the vhost application of stopping DMA
transfers.
Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
---
lib/vhost/vhost_user.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 031c578e54..39e8432d1c 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -1275,6 +1275,15 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
}
+
+ /* notify the backend application to stop DMA transfers */
+ if (dev->async_copy && dev->notify_ops->vring_state_changed) {
+ for (i = 0; i < dev->nr_vring; i++) {
+ dev->notify_ops->vring_state_changed(dev->vid,
+ i, 0);
+ }
+ }
+
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v4 4/5] examples/vhost: handle memory hotplug for async vhost
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
` (2 preceding siblings ...)
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 3/5] vhost: handle memory hotplug for " Cheng Jiang
@ 2021-07-16 2:59 ` Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 5/5] doc: update doc for try drain API in vhost lib Cheng Jiang
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-16 2:59 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
To accomplish that, we need to do these changes in the vhost sample:
1. add inflight pkt count.
2. add vring_state_changed() callback.
3. add inflight pkt drain process in destroy_device() and
vring_state_changed().
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
examples/vhost/main.c | 48 +++++++++++++++++++++++++++++++++++++++++--
examples/vhost/main.h | 1 +
2 files changed, 47 insertions(+), 2 deletions(-)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index d2179eadb9..9014c999be 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -851,8 +851,11 @@ complete_async_pkts(struct vhost_dev *vdev)
complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
- if (complete_count)
+ if (complete_count) {
free_pkts(p_cpl, complete_count);
+ __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
+ }
+
}
static __rte_always_inline void
@@ -895,6 +898,7 @@ drain_vhost(struct vhost_dev *vdev)
complete_async_pkts(vdev);
ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST);
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1226,6 +1230,9 @@ drain_eth_rx(struct vhost_dev *vdev)
enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
VIRTIO_RXQ, pkts, rx_count,
m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr,
+ __ATOMIC_SEQ_CST);
+
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1397,8 +1404,15 @@ destroy_device(int vid)
"(%d) device has been removed from data core\n",
vdev->vid);
- if (async_vhost_driver)
+ if (async_vhost_driver) {
+ uint16_t n_pkt = 0;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+ n_pkt = rte_vhost_try_drain_queue_thread_unsafe(vid, VIRTIO_RXQ, m_cpl,
+ vdev->pkts_inflight, 2);
+
+ free_pkts(m_cpl, n_pkt);
rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
+ }
rte_free(vdev);
}
@@ -1487,6 +1501,35 @@ new_device(int vid)
return 0;
}
+static int
+vring_state_changed(int vid, uint16_t queue_id, int enable)
+{
+ struct vhost_dev *vdev = NULL;
+
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (vdev->vid == vid)
+ break;
+ }
+ if (!vdev)
+ return -1;
+
+ if (queue_id != VIRTIO_RXQ)
+ return 0;
+
+ if (async_vhost_driver) {
+ if (!enable) {
+ uint16_t n_pkt;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+ n_pkt = rte_vhost_try_drain_queue_thread_unsafe(vid, queue_id,
+ m_cpl, vdev->pkts_inflight, 2);
+ free_pkts(m_cpl, n_pkt);
+ }
+ }
+
+ return 0;
+}
+
/*
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
@@ -1495,6 +1538,7 @@ static const struct vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
+ .vring_state_changed = vring_state_changed,
};
/*
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 0ccdce4b4a..e7b1ac60a6 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -51,6 +51,7 @@ struct vhost_dev {
uint64_t features;
size_t hdr_len;
uint16_t nr_vrings;
+ uint16_t pkts_inflight;
struct rte_vhost_memory *mem;
struct device_statistics stats;
TAILQ_ENTRY(vhost_dev) global_vdev_entry;
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v4 5/5] doc: update doc for try drain API in vhost lib
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
` (3 preceding siblings ...)
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 4/5] examples/vhost: " Cheng Jiang
@ 2021-07-16 2:59 ` Cheng Jiang
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-16 2:59 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
update the program guide and release notes for try drain API in vhost
lib.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
doc/guides/prog_guide/vhost_lib.rst | 5 +++++
doc/guides/rel_notes/release_21_08.rst | 5 +++++
2 files changed, 10 insertions(+)
diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index d18fb98910..85aabc4a75 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -281,6 +281,11 @@ The following is an overview of some key Vhost API functions:
Poll enqueue completion status from async data path. Completed packets
are returned to applications through ``pkts``.
+* ``rte_vhost_try_drain_queue_thread_unsafe(vid, queue_id, **pkts, count, times)``
+
+ Try to drain in-flight packets which are submitted to DMA engine in vhost async data
+ path. Completed packets are returned to applications through ``pkts``.
+
Vhost-user Implementations
--------------------------
diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
index 6a902ef9ac..c38e358cf9 100644
--- a/doc/guides/rel_notes/release_21_08.rst
+++ b/doc/guides/rel_notes/release_21_08.rst
@@ -117,6 +117,11 @@ New Features
The experimental PMD power management API now supports managing
multiple Ethernet Rx queues per lcore.
+* **Added try drain API in vhost library.**
+
+ Added an API which can try to drain the inflight packets submitted to DMA
+ engine in vhost async data path.
+
Removed Items
-------------
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/5] vhost: fix async vhost ops return type
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 1/5] vhost: fix async vhost ops return type Cheng Jiang
@ 2021-07-16 5:36 ` Xia, Chenbo
2021-07-16 5:58 ` Jiang, Cheng1
0 siblings, 1 reply; 70+ messages in thread
From: Xia, Chenbo @ 2021-07-16 5:36 UTC (permalink / raw)
To: Jiang, Cheng1, maxime.coquelin; +Cc: dev, Hu, Jiayu, Yang, YvonneX
Hi Cheng,
> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> Sent: Friday, July 16, 2021 10:59 AM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>
> Subject: [PATCH v4 1/5] vhost: fix async vhost ops return type
>
> The async vhost ops callback should return -1 when there are something
Ops callback -> callback ops
Since the return value is redefined. Let's update ops description of struct
rte_vhost_async_channel_ops. And I suggest return negative value when error,
rather than only -1.
> wrong in the callback, so the return type should be changed into
> int32_t. The issue in vhost example is also fixed in this patch.
>
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
> examples/vhost/ioat.c | 4 +--
> examples/vhost/ioat.h | 4 +--
> lib/vhost/rte_vhost_async.h | 4 +--
> lib/vhost/virtio_net.c | 58 ++++++++++++++++++++++++++++++++-----
> 4 files changed, 56 insertions(+), 14 deletions(-)
>
> diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
> index 2a2c2d7202..457f8171f0 100644
> --- a/examples/vhost/ioat.c
> +++ b/examples/vhost/ioat.c
> @@ -122,7 +122,7 @@ open_ioat(const char *value)
> return ret;
> }
>
> -uint32_t
> +int32_t
> ioat_transfer_data_cb(int vid, uint16_t queue_id,
> struct rte_vhost_async_desc *descs,
> struct rte_vhost_async_status *opaque_data, uint16_t count)
> @@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
> return i_desc;
> }
>
> -uint32_t
> +int32_t
> ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
> struct rte_vhost_async_status *opaque_data,
> uint16_t max_packets)
> diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
> index 1aa28ed6a3..b57b5645b0 100644
> --- a/examples/vhost/ioat.h
> +++ b/examples/vhost/ioat.h
> @@ -27,12 +27,12 @@ struct dma_for_vhost {
> #ifdef RTE_RAW_IOAT
> int open_ioat(const char *value);
>
> -uint32_t
> +int32_t
> ioat_transfer_data_cb(int vid, uint16_t queue_id,
> struct rte_vhost_async_desc *descs,
> struct rte_vhost_async_status *opaque_data, uint16_t count);
>
> -uint32_t
> +int32_t
> ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
> struct rte_vhost_async_status *opaque_data,
> uint16_t max_packets);
> diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
> index 6faa31f5ad..bc81cd0caa 100644
> --- a/lib/vhost/rte_vhost_async.h
> +++ b/lib/vhost/rte_vhost_async.h
> @@ -61,7 +61,7 @@ struct rte_vhost_async_channel_ops {
> * @return
> * number of descs processed
> */
> - uint32_t (*transfer_data)(int vid, uint16_t queue_id,
> + int32_t (*transfer_data)(int vid, uint16_t queue_id,
> struct rte_vhost_async_desc *descs,
> struct rte_vhost_async_status *opaque_data,
> uint16_t count);
> @@ -78,7 +78,7 @@ struct rte_vhost_async_channel_ops {
> * @return
> * number of async descs completed
> */
> - uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
> + int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
> struct rte_vhost_async_status *opaque_data,
> uint16_t max_packets);
> };
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index b93482587c..8156796a46 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -1528,6 +1528,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
> struct async_inflight_info *pkts_info = vq->async_pkts_info;
> uint32_t n_pkts = 0, pkt_err = 0;
> uint32_t num_async_pkts = 0, num_done_pkts = 0;
> + int32_t n_enq;
> struct {
> uint16_t pkt_idx;
> uint16_t last_avail_idx;
> @@ -1608,8 +1609,16 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
> if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
> ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
> BUF_VECTOR_MAX))) {
> - n_pkts = vq->async_ops.transfer_data(dev->vid,
> + n_enq = vq->async_ops.transfer_data(dev->vid,
> queue_id, tdes, 0, pkt_burst_idx);
> + if (n_enq >= 0) {
> + n_pkts = n_enq;
> + } else {
> + VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for
> queue id %d.\n",
You can't assume the error is caused by wrong opaque data because of different
implementation of the callback.
It's better to replace 'n_enq' with 'n_xfer' as we use the name 'transfer' in
callback definition.
If you agree with above, please also change in other funcs below.
> + dev->vid, __func__, queue_id);
> + n_pkts = 0;
> + }
> +
> iovec_idx = 0;
> it_idx = 0;
>
> @@ -1632,8 +1641,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
> }
>
> if (pkt_burst_idx) {
> - n_pkts = vq->async_ops.transfer_data(dev->vid,
> - queue_id, tdes, 0, pkt_burst_idx);
> + n_enq = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0,
> pkt_burst_idx);
> + if (n_enq >= 0) {
> + n_pkts = n_enq;
> + } else {
> + VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue
> id %d.\n",
> + dev->vid, __func__, queue_id);
> + n_pkts = 0;
> + }
> +
> vq->async_pkts_inflight_n += n_pkts;
>
> if (unlikely(n_pkts < pkt_burst_idx))
> @@ -1903,6 +1919,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
> uint16_t async_descs_idx = 0;
> uint16_t num_buffers;
> uint16_t num_descs;
> + int32_t n_enq;
>
> struct rte_vhost_iov_iter *it_pool = vq->it_pool;
> struct iovec *vec_pool = vq->vec_pool;
> @@ -1983,8 +2000,16 @@ virtio_dev_rx_async_submit_packed(struct virtio_net
> *dev,
> */
> if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
> ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX)))
> {
> - n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
> - tdes, 0, pkt_burst_idx);
> + n_enq = vq->async_ops.transfer_data(dev->vid,
> + queue_id, tdes, 0, pkt_burst_idx);
> + if (n_enq >= 0) {
> + n_pkts = n_enq;
> + } else {
> + VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for
> queue id %d.\n",
> + dev->vid, __func__, queue_id);
> + n_pkts = 0;
> + }
> +
> iovec_idx = 0;
> it_idx = 0;
> segs_await = 0;
> @@ -2006,7 +2031,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net
> *dev,
> } while (pkt_idx < count);
>
> if (pkt_burst_idx) {
> - n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0,
> pkt_burst_idx);
> + n_enq = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0,
> pkt_burst_idx);
> + if (n_enq >= 0) {
> + n_pkts = n_enq;
> + } else {
> + VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue
> id %d.\n",
> + dev->vid, __func__, queue_id);
> + n_pkts = 0;
> + }
> +
> vq->async_pkts_inflight_n += n_pkts;
>
> if (unlikely(n_pkts < pkt_burst_idx))
> @@ -2091,6 +2124,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid,
> uint16_t queue_id,
> uint16_t start_idx, pkts_idx, vq_size;
> struct async_inflight_info *pkts_info;
> uint16_t from, i;
> + int32_t n_poll;
>
> if (!dev)
> return 0;
> @@ -2118,9 +2152,17 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid,
> uint16_t queue_id,
> start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
> vq_size, vq->async_pkts_inflight_n);
>
> - if (count > vq->async_last_pkts_n)
> - n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
> + if (count > vq->async_last_pkts_n) {
> + n_poll = vq->async_ops.check_completed_copies(vid,
> queue_id, 0, count - vq->async_last_pkts_n);
The name 'n_poll' is not related with the callback name. Maybe 'n_cpl'?
> + if (n_poll >= 0) {
> + n_pkts_cpl = n_poll;
> + } else {
> + VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque data for queue
> id %d.\n",
I suggest using different log for submit and check complete so that it's easier
for users to know what's wrong.
Thanks,
Chenbo
> + dev->vid, __func__, queue_id);
> + n_pkts_cpl = 0;
> + }
> + }
> n_pkts_cpl += vq->async_last_pkts_n;
>
> n_pkts_put = RTE_MIN(count, n_pkts_cpl);
> --
> 2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/5] vhost: fix async vhost ops return type
2021-07-16 5:36 ` Xia, Chenbo
@ 2021-07-16 5:58 ` Jiang, Cheng1
0 siblings, 0 replies; 70+ messages in thread
From: Jiang, Cheng1 @ 2021-07-16 5:58 UTC (permalink / raw)
To: Xia, Chenbo, maxime.coquelin; +Cc: dev, Hu, Jiayu, Yang, YvonneX
Hi Chenbo,
> -----Original Message-----
> From: Xia, Chenbo <chenbo.xia@intel.com>
> Sent: Friday, July 16, 2021 1:37 PM
> To: Jiang, Cheng1 <cheng1.jiang@intel.com>; maxime.coquelin@redhat.com
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>
> Subject: RE: [PATCH v4 1/5] vhost: fix async vhost ops return type
>
> Hi Cheng,
>
> > -----Original Message-----
> > From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> > Sent: Friday, July 16, 2021 10:59 AM
> > To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> > Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> > <yvonnex.yang@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>
> > Subject: [PATCH v4 1/5] vhost: fix async vhost ops return type
> >
> > The async vhost ops callback should return -1 when there are something
>
> Ops callback -> callback ops
>
> Since the return value is redefined. Let's update ops description of struct
> rte_vhost_async_channel_ops. And I suggest return negative value when
> error, rather than only -1.
>
Sure, agreed.
> > wrong in the callback, so the return type should be changed into
> > int32_t. The issue in vhost example is also fixed in this patch.
> >
> > Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> > ---
> > examples/vhost/ioat.c | 4 +--
> > examples/vhost/ioat.h | 4 +--
> > lib/vhost/rte_vhost_async.h | 4 +--
> > lib/vhost/virtio_net.c | 58 ++++++++++++++++++++++++++++++++-----
> > 4 files changed, 56 insertions(+), 14 deletions(-)
> >
> > diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c index
> > 2a2c2d7202..457f8171f0 100644
> > --- a/examples/vhost/ioat.c
> > +++ b/examples/vhost/ioat.c
> > @@ -122,7 +122,7 @@ open_ioat(const char *value)
> > return ret;
> > }
> >
> > -uint32_t
> > +int32_t
> > ioat_transfer_data_cb(int vid, uint16_t queue_id,
> > struct rte_vhost_async_desc *descs,
> > struct rte_vhost_async_status *opaque_data, uint16_t count)
> @@
> > -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
> > return i_desc;
> > }
> >
> > -uint32_t
> > +int32_t
> > ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
> > struct rte_vhost_async_status *opaque_data,
> > uint16_t max_packets)
> > diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h index
> > 1aa28ed6a3..b57b5645b0 100644
> > --- a/examples/vhost/ioat.h
> > +++ b/examples/vhost/ioat.h
> > @@ -27,12 +27,12 @@ struct dma_for_vhost { #ifdef RTE_RAW_IOAT int
> > open_ioat(const char *value);
> >
> > -uint32_t
> > +int32_t
> > ioat_transfer_data_cb(int vid, uint16_t queue_id,
> > struct rte_vhost_async_desc *descs,
> > struct rte_vhost_async_status *opaque_data, uint16_t
> count);
> >
> > -uint32_t
> > +int32_t
> > ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
> > struct rte_vhost_async_status *opaque_data,
> > uint16_t max_packets);
> > diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
> > index 6faa31f5ad..bc81cd0caa 100644
> > --- a/lib/vhost/rte_vhost_async.h
> > +++ b/lib/vhost/rte_vhost_async.h
> > @@ -61,7 +61,7 @@ struct rte_vhost_async_channel_ops {
> > * @return
> > * number of descs processed
> > */
> > - uint32_t (*transfer_data)(int vid, uint16_t queue_id,
> > + int32_t (*transfer_data)(int vid, uint16_t queue_id,
> > struct rte_vhost_async_desc *descs,
> > struct rte_vhost_async_status *opaque_data,
> > uint16_t count);
> > @@ -78,7 +78,7 @@ struct rte_vhost_async_channel_ops {
> > * @return
> > * number of async descs completed
> > */
> > - uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
> > + int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
> > struct rte_vhost_async_status *opaque_data,
> > uint16_t max_packets);
> > };
> > diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index
> > b93482587c..8156796a46 100644
> > --- a/lib/vhost/virtio_net.c
> > +++ b/lib/vhost/virtio_net.c
> > @@ -1528,6 +1528,7 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
> > struct async_inflight_info *pkts_info = vq->async_pkts_info;
> > uint32_t n_pkts = 0, pkt_err = 0;
> > uint32_t num_async_pkts = 0, num_done_pkts = 0;
> > + int32_t n_enq;
> > struct {
> > uint16_t pkt_idx;
> > uint16_t last_avail_idx;
> > @@ -1608,8 +1609,16 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
> > if (unlikely(pkt_burst_idx >=
> VHOST_ASYNC_BATCH_THRESHOLD ||
> > ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
> > BUF_VECTOR_MAX))) {
> > - n_pkts = vq->async_ops.transfer_data(dev->vid,
> > + n_enq = vq->async_ops.transfer_data(dev->vid,
> > queue_id, tdes, 0, pkt_burst_idx);
> > + if (n_enq >= 0) {
> > + n_pkts = n_enq;
> > + } else {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: wrong
> opaque data for
> > queue id %d.\n",
>
> You can't assume the error is caused by wrong opaque data because of
> different implementation of the callback.
>
> It's better to replace 'n_enq' with 'n_xfer' as we use the name 'transfer' in
> callback definition.
>
> If you agree with above, please also change in other funcs below.
Sure, agreed. It will be fixed in the next version.
>
> > + dev->vid, __func__, queue_id);
> > + n_pkts = 0;
> > + }
> > +
> > iovec_idx = 0;
> > it_idx = 0;
> >
> > @@ -1632,8 +1641,15 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
> > }
> >
> > if (pkt_burst_idx) {
> > - n_pkts = vq->async_ops.transfer_data(dev->vid,
> > - queue_id, tdes, 0, pkt_burst_idx);
> > + n_enq = vq->async_ops.transfer_data(dev->vid, queue_id,
> tdes, 0,
> > pkt_burst_idx);
> > + if (n_enq >= 0) {
> > + n_pkts = n_enq;
> > + } else {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque
> data for queue
> > id %d.\n",
> > + dev->vid, __func__, queue_id);
> > + n_pkts = 0;
> > + }
> > +
> > vq->async_pkts_inflight_n += n_pkts;
> >
> > if (unlikely(n_pkts < pkt_burst_idx)) @@ -1903,6 +1919,7
> @@
> > virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
> > uint16_t async_descs_idx = 0;
> > uint16_t num_buffers;
> > uint16_t num_descs;
> > + int32_t n_enq;
> >
> > struct rte_vhost_iov_iter *it_pool = vq->it_pool;
> > struct iovec *vec_pool = vq->vec_pool; @@ -1983,8 +2000,16 @@
> > virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
> > */
> > if (unlikely(pkt_burst_idx >=
> VHOST_ASYNC_BATCH_THRESHOLD ||
> > ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
> BUF_VECTOR_MAX))) {
> > - n_pkts = vq->async_ops.transfer_data(dev->vid,
> queue_id,
> > - tdes, 0, pkt_burst_idx);
> > + n_enq = vq->async_ops.transfer_data(dev->vid,
> > + queue_id, tdes, 0, pkt_burst_idx);
> > + if (n_enq >= 0) {
> > + n_pkts = n_enq;
> > + } else {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: wrong
> opaque data for
> > queue id %d.\n",
> > + dev->vid, __func__, queue_id);
> > + n_pkts = 0;
> > + }
> > +
> > iovec_idx = 0;
> > it_idx = 0;
> > segs_await = 0;
> > @@ -2006,7 +2031,15 @@ virtio_dev_rx_async_submit_packed(struct
> > virtio_net *dev,
> > } while (pkt_idx < count);
> >
> > if (pkt_burst_idx) {
> > - n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
> tdes, 0,
> > pkt_burst_idx);
> > + n_enq = vq->async_ops.transfer_data(dev->vid, queue_id,
> tdes, 0,
> > pkt_burst_idx);
> > + if (n_enq >= 0) {
> > + n_pkts = n_enq;
> > + } else {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque
> data for queue
> > id %d.\n",
> > + dev->vid, __func__, queue_id);
> > + n_pkts = 0;
> > + }
> > +
> > vq->async_pkts_inflight_n += n_pkts;
> >
> > if (unlikely(n_pkts < pkt_burst_idx)) @@ -2091,6 +2124,7
> @@
> > uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > uint16_t start_idx, pkts_idx, vq_size;
> > struct async_inflight_info *pkts_info;
> > uint16_t from, i;
> > + int32_t n_poll;
> >
> > if (!dev)
> > return 0;
> > @@ -2118,9 +2152,17 @@ uint16_t
> rte_vhost_poll_enqueue_completed(int
> > vid, uint16_t queue_id,
> > start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
> > vq_size, vq->async_pkts_inflight_n);
> >
> > - if (count > vq->async_last_pkts_n)
> > - n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
> > + if (count > vq->async_last_pkts_n) {
> > + n_poll = vq->async_ops.check_completed_copies(vid,
> > queue_id, 0, count - vq->async_last_pkts_n);
>
> The name 'n_poll' is not related with the callback name. Maybe 'n_cpl'?
>
> > + if (n_poll >= 0) {
> > + n_pkts_cpl = n_poll;
> > + } else {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: wrong opaque
> data for queue
> > id %d.\n",
>
> I suggest using different log for submit and check complete so that it's easier
> for users to know what's wrong.
Agreed, it will be fixed in the next version.
Thanks,
Cheng
>
> Thanks,
> Chenbo
>
> > + dev->vid, __func__, queue_id);
> > + n_pkts_cpl = 0;
> > + }
> > + }
> > n_pkts_cpl += vq->async_last_pkts_n;
> >
> > n_pkts_put = RTE_MIN(count, n_pkts_cpl);
> > --
> > 2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v5 0/5] vhost: handle memory hotplug for async vhost
2021-06-02 4:28 [dpdk-dev] [PATCH 0/2] vhost: handle memory hotplug for async vhost Cheng Jiang
` (4 preceding siblings ...)
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
@ 2021-07-16 7:24 ` Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 1/5] vhost: fix async vhost ops return type Cheng Jiang
` (4 more replies)
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
` (2 subsequent siblings)
8 siblings, 5 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-16 7:24 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch set is to provide an unsafe API to drain in-flight pkts
which are submitted to DMA engine in vhost async data path, and
notify the vhost application of stopping DMA transfers. And enable it
in vhost example.
v5:
* added fixes in 'vhost: fix async vhost ops return type'
* improved git log, variable names and logs
v4:
* rebased on the latest codess
v3:
* added a patch to fix async ops return type
* fixed async ops fail handler
* updated the doc
v2:
* changed the patch structure
Cheng Jiang (4):
vhost: fix async vhost ops return type
vhost: add unsafe API to drain pkts in async vhost
examples/vhost: handle memory hotplug for async vhost
doc: update doc for try drain API in vhost lib
Jiayu Hu (1):
vhost: handle memory hotplug for async vhost
doc/guides/prog_guide/vhost_lib.rst | 5 +
doc/guides/rel_notes/release_21_08.rst | 5 +
examples/vhost/ioat.c | 4 +-
examples/vhost/ioat.h | 4 +-
examples/vhost/main.c | 48 +++++++-
examples/vhost/main.h | 1 +
lib/vhost/rte_vhost_async.h | 32 +++++-
lib/vhost/version.map | 3 +
lib/vhost/vhost_user.c | 9 ++
lib/vhost/virtio_net.c | 149 ++++++++++++++++++++-----
10 files changed, 220 insertions(+), 40 deletions(-)
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v5 1/5] vhost: fix async vhost ops return type
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
@ 2021-07-16 7:24 ` Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 2/5] vhost: add unsafe API to drain pkts in async vhost Cheng Jiang
` (3 subsequent siblings)
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-16 7:24 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia
Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang, stable
The async vhost callback ops should return negative value when there
are something wrong in the callback, so the return type should be
changed into int32_t. The issue in vhost example is also fixed.
Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
Fixes: 819a71685826 ("vhost: fix async callback return type")
Fixes: 6b3c81db8bb7 ("vhost: simplify async copy completion")
Fixes: abec60e7115d ("examples/vhost: support vhost async data path")
Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable@dpdk.org
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
examples/vhost/ioat.c | 4 +--
examples/vhost/ioat.h | 4 +--
lib/vhost/rte_vhost_async.h | 8 ++---
lib/vhost/virtio_net.c | 61 ++++++++++++++++++++++++++++++++-----
4 files changed, 61 insertions(+), 16 deletions(-)
diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index 2a2c2d7202..457f8171f0 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -122,7 +122,7 @@ open_ioat(const char *value)
return ret;
}
-uint32_t
+int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count)
@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
return i_desc;
}
-uint32_t
+int32_t
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets)
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 1aa28ed6a3..b57b5645b0 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -27,12 +27,12 @@ struct dma_for_vhost {
#ifdef RTE_RAW_IOAT
int open_ioat(const char *value);
-uint32_t
+int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count);
-uint32_t
+int32_t
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 6faa31f5ad..e964d83837 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -59,9 +59,9 @@ struct rte_vhost_async_channel_ops {
* @param count
* number of elements in the "descs" array
* @return
- * number of descs processed
+ * number of descs processed, negative value means error
*/
- uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+ int32_t (*transfer_data)(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data,
uint16_t count);
@@ -76,9 +76,9 @@ struct rte_vhost_async_channel_ops {
* @param max_packets
* max number of packets could be completed
* @return
- * number of async descs completed
+ * number of async descs completed, negative value means error
*/
- uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+ int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index b93482587c..16ae4d9e19 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1528,6 +1528,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
struct async_inflight_info *pkts_info = vq->async_pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
uint32_t num_async_pkts = 0, num_done_pkts = 0;
+ int32_t n_xfer;
struct {
uint16_t pkt_idx;
uint16_t last_avail_idx;
@@ -1608,8 +1609,17 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
+ n_xfer = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
@@ -1632,8 +1642,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
}
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
- queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
@@ -1903,6 +1920,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
uint16_t async_descs_idx = 0;
uint16_t num_buffers;
uint16_t num_descs;
+ int32_t n_xfer;
struct rte_vhost_iov_iter *it_pool = vq->it_pool;
struct iovec *vec_pool = vq->vec_pool;
@@ -1983,8 +2001,17 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
*/
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
- tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid,
+ queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
segs_await = 0;
@@ -2006,7 +2033,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
} while (pkt_idx < count);
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
@@ -2091,6 +2126,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
uint16_t start_idx, pkts_idx, vq_size;
struct async_inflight_info *pkts_info;
uint16_t from, i;
+ int32_t n_cpl;
if (!dev)
return 0;
@@ -2118,9 +2154,18 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
- if (count > vq->async_last_pkts_n)
- n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+ if (count > vq->async_last_pkts_n) {
+ n_cpl = vq->async_ops.check_completed_copies(vid,
queue_id, 0, count - vq->async_last_pkts_n);
+ if (n_cpl >= 0) {
+ n_pkts_cpl = n_cpl;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to check completed copies for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts_cpl = 0;
+ }
+ }
n_pkts_cpl += vq->async_last_pkts_n;
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v5 2/5] vhost: add unsafe API to drain pkts in async vhost
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 1/5] vhost: fix async vhost ops return type Cheng Jiang
@ 2021-07-16 7:24 ` Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 3/5] vhost: handle memory hotplug for " Cheng Jiang
` (2 subsequent siblings)
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-16 7:24 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
Applications need to stop DMA transfers and finish all the in-flight
pkts when in VM memory hot-plug case and async vhost is used. This
patch is to provide an unsafe API to drain in-flight pkts which are
submitted to DMA engine in vhost async data path.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
lib/vhost/rte_vhost_async.h | 24 ++++++++++
lib/vhost/version.map | 3 ++
lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
3 files changed, 94 insertions(+), 23 deletions(-)
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index e964d83837..c3de79d1e1 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -193,4 +193,28 @@ __rte_experimental
uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count);
+/**
+ * This function checks async completion status and empty all pakcets
+ * for a specific vhost device queue. Packets which are inflight will
+ * be returned in an array.
+ *
+ * @note This function does not perform any locking
+ *
+ * @param vid
+ * id of vhost device to enqueue data
+ * @param queue_id
+ * queue id to enqueue data
+ * @param pkts
+ * blank array to get return packet pointer
+ * @param count
+ * size of the packet array
+ * @param times
+ * max number of poll attempts
+ * @return
+ * num of packets returned
+ */
+__rte_experimental
+uint16_t rte_vhost_try_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count, uint16_t times);
+
#endif /* _RTE_VHOST_ASYNC_H_ */
diff --git a/lib/vhost/version.map b/lib/vhost/version.map
index 9103a23cd4..b8fc8770dd 100644
--- a/lib/vhost/version.map
+++ b/lib/vhost/version.map
@@ -79,4 +79,7 @@ EXPERIMENTAL {
# added in 21.05
rte_vhost_get_negotiated_protocol_features;
+
+ # added in 21.08
+ rte_vhost_try_drain_queue_thread_unsafe;
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 16ae4d9e19..c3a1493e0d 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2117,10 +2117,10 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
} while (nr_left > 0);
}
-uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+static __rte_always_inline uint16_t
+vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
- struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
uint16_t start_idx, pkts_idx, vq_size;
@@ -2128,26 +2128,8 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
uint16_t from, i;
int32_t n_cpl;
- if (!dev)
- return 0;
-
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
- rte_spinlock_lock(&vq->access_lock);
-
pkts_idx = vq->async_pkts_idx % vq->size;
pkts_info = vq->async_pkts_info;
vq_size = vq->size;
@@ -2155,7 +2137,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq_size, vq->async_pkts_inflight_n);
if (count > vq->async_last_pkts_n) {
- n_cpl = vq->async_ops.check_completed_copies(vid,
+ n_cpl = vq->async_ops.check_completed_copies(dev->vid,
queue_id, 0, count - vq->async_last_pkts_n);
if (n_cpl >= 0) {
n_pkts_cpl = n_cpl;
@@ -2171,7 +2153,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
if (unlikely(n_pkts_put == 0)) {
vq->async_last_pkts_n = n_pkts_cpl;
- goto done;
+ return 0;
}
if (vq_is_packed(dev)) {
@@ -2210,12 +2192,74 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq->last_async_desc_idx_split += n_descs;
}
-done:
+ return n_pkts_put;
+}
+
+uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_put = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ n_pkts_put = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
rte_spinlock_unlock(&vq->access_lock);
return n_pkts_put;
}
+uint16_t rte_vhost_try_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count, uint16_t times)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_cpl = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ while ((n_cpl < count) && times--)
+ n_cpl += vhost_poll_enqueue_completed(dev, queue_id, pkts + n_cpl, count);
+
+ return n_cpl;
+}
+
static __rte_always_inline uint32_t
virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count,
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v5 3/5] vhost: handle memory hotplug for async vhost
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 2/5] vhost: add unsafe API to drain pkts in async vhost Cheng Jiang
@ 2021-07-16 7:24 ` Cheng Jiang
2021-07-19 5:19 ` Xia, Chenbo
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 4/5] examples/vhost: " Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 5/5] doc: update doc for try drain API in vhost lib Cheng Jiang
4 siblings, 1 reply; 70+ messages in thread
From: Cheng Jiang @ 2021-07-16 7:24 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
From: Jiayu Hu <jiayu.hu@intel.com>
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch is to notify the vhost application of stopping DMA
transfers.
Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
---
lib/vhost/vhost_user.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 031c578e54..39e8432d1c 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -1275,6 +1275,15 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
}
+
+ /* notify the backend application to stop DMA transfers */
+ if (dev->async_copy && dev->notify_ops->vring_state_changed) {
+ for (i = 0; i < dev->nr_vring; i++) {
+ dev->notify_ops->vring_state_changed(dev->vid,
+ i, 0);
+ }
+ }
+
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v5 4/5] examples/vhost: handle memory hotplug for async vhost
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
` (2 preceding siblings ...)
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 3/5] vhost: handle memory hotplug for " Cheng Jiang
@ 2021-07-16 7:24 ` Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 5/5] doc: update doc for try drain API in vhost lib Cheng Jiang
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-16 7:24 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
To accomplish that, we need to do these changes in the vhost sample:
1. add inflight pkt count.
2. add vring_state_changed() callback.
3. add inflight pkt drain process in destroy_device() and
vring_state_changed().
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
examples/vhost/main.c | 48 +++++++++++++++++++++++++++++++++++++++++--
examples/vhost/main.h | 1 +
2 files changed, 47 insertions(+), 2 deletions(-)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index d2179eadb9..9014c999be 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -851,8 +851,11 @@ complete_async_pkts(struct vhost_dev *vdev)
complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
- if (complete_count)
+ if (complete_count) {
free_pkts(p_cpl, complete_count);
+ __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
+ }
+
}
static __rte_always_inline void
@@ -895,6 +898,7 @@ drain_vhost(struct vhost_dev *vdev)
complete_async_pkts(vdev);
ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST);
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1226,6 +1230,9 @@ drain_eth_rx(struct vhost_dev *vdev)
enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
VIRTIO_RXQ, pkts, rx_count,
m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr,
+ __ATOMIC_SEQ_CST);
+
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1397,8 +1404,15 @@ destroy_device(int vid)
"(%d) device has been removed from data core\n",
vdev->vid);
- if (async_vhost_driver)
+ if (async_vhost_driver) {
+ uint16_t n_pkt = 0;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+ n_pkt = rte_vhost_try_drain_queue_thread_unsafe(vid, VIRTIO_RXQ, m_cpl,
+ vdev->pkts_inflight, 2);
+
+ free_pkts(m_cpl, n_pkt);
rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
+ }
rte_free(vdev);
}
@@ -1487,6 +1501,35 @@ new_device(int vid)
return 0;
}
+static int
+vring_state_changed(int vid, uint16_t queue_id, int enable)
+{
+ struct vhost_dev *vdev = NULL;
+
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (vdev->vid == vid)
+ break;
+ }
+ if (!vdev)
+ return -1;
+
+ if (queue_id != VIRTIO_RXQ)
+ return 0;
+
+ if (async_vhost_driver) {
+ if (!enable) {
+ uint16_t n_pkt;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+ n_pkt = rte_vhost_try_drain_queue_thread_unsafe(vid, queue_id,
+ m_cpl, vdev->pkts_inflight, 2);
+ free_pkts(m_cpl, n_pkt);
+ }
+ }
+
+ return 0;
+}
+
/*
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
@@ -1495,6 +1538,7 @@ static const struct vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
+ .vring_state_changed = vring_state_changed,
};
/*
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 0ccdce4b4a..e7b1ac60a6 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -51,6 +51,7 @@ struct vhost_dev {
uint64_t features;
size_t hdr_len;
uint16_t nr_vrings;
+ uint16_t pkts_inflight;
struct rte_vhost_memory *mem;
struct device_statistics stats;
TAILQ_ENTRY(vhost_dev) global_vdev_entry;
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v5 5/5] doc: update doc for try drain API in vhost lib
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
` (3 preceding siblings ...)
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 4/5] examples/vhost: " Cheng Jiang
@ 2021-07-16 7:24 ` Cheng Jiang
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-16 7:24 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
update the program guide and release notes for try drain API in vhost
lib.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
doc/guides/prog_guide/vhost_lib.rst | 5 +++++
doc/guides/rel_notes/release_21_08.rst | 5 +++++
2 files changed, 10 insertions(+)
diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index d18fb98910..85aabc4a75 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -281,6 +281,11 @@ The following is an overview of some key Vhost API functions:
Poll enqueue completion status from async data path. Completed packets
are returned to applications through ``pkts``.
+* ``rte_vhost_try_drain_queue_thread_unsafe(vid, queue_id, **pkts, count, times)``
+
+ Try to drain in-flight packets which are submitted to DMA engine in vhost async data
+ path. Completed packets are returned to applications through ``pkts``.
+
Vhost-user Implementations
--------------------------
diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
index 6a902ef9ac..c38e358cf9 100644
--- a/doc/guides/rel_notes/release_21_08.rst
+++ b/doc/guides/rel_notes/release_21_08.rst
@@ -117,6 +117,11 @@ New Features
The experimental PMD power management API now supports managing
multiple Ethernet Rx queues per lcore.
+* **Added try drain API in vhost library.**
+
+ Added an API which can try to drain the inflight packets submitted to DMA
+ engine in vhost async data path.
+
Removed Items
-------------
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v4 2/5] vhost: add unsafe API to drain pkts in async vhost
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 2/5] vhost: add unsafe API to drain pkts in async vhost Cheng Jiang
@ 2021-07-16 8:56 ` Xia, Chenbo
2021-07-19 3:28 ` Jiang, Cheng1
0 siblings, 1 reply; 70+ messages in thread
From: Xia, Chenbo @ 2021-07-16 8:56 UTC (permalink / raw)
To: Jiang, Cheng1, maxime.coquelin; +Cc: dev, Hu, Jiayu, Yang, YvonneX
Hi Cheng,
> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> Sent: Friday, July 16, 2021 10:59 AM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>
> Subject: [PATCH v4 2/5] vhost: add unsafe API to drain pkts in async vhost
>
> Applications need to stop DMA transfers and finish all the in-flight
> pkts when in VM memory hot-plug case and async vhost is used. This
Pkts -> packets
> patch is to provide an unsafe API to drain in-flight pkts which are
Ditto
> submitted to DMA engine in vhost async data path.
>
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
> lib/vhost/rte_vhost_async.h | 24 ++++++++++
> lib/vhost/version.map | 3 ++
> lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
> 3 files changed, 94 insertions(+), 23 deletions(-)
>
> diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
> index bc81cd0caa..fd622631b2 100644
> --- a/lib/vhost/rte_vhost_async.h
> +++ b/lib/vhost/rte_vhost_async.h
> @@ -193,4 +193,28 @@ __rte_experimental
> uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> struct rte_mbuf **pkts, uint16_t count);
>
> +/**
> + * This function checks async completion status and empty all pakcets
Pakcets -> packets
> + * for a specific vhost device queue. Packets which are inflight will
> + * be returned in an array.
> + *
> + * @note This function does not perform any locking
> + *
> + * @param vid
> + * id of vhost device to enqueue data
id -> ID
to drain data?
> + * @param queue_id
> + * queue id to enqueue data
Ditto
> + * @param pkts
> + * blank array to get return packet pointer
Return -> returned
> + * @param count
> + * size of the packet array
> + * @param times
> + * max number of poll attempts
> + * @return
> + * num of packets returned
num -> Number
And please use capital for first character in each line of param description.
> + */
> +__rte_experimental
> +uint16_t rte_vhost_try_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
I think 'rte_vhost_drain_queue_thread_unsafe' is better?
> + struct rte_mbuf **pkts, uint16_t count, uint16_t times);
> +
> #endif /* _RTE_VHOST_ASYNC_H_ */
> diff --git a/lib/vhost/version.map b/lib/vhost/version.map
> index 9103a23cd4..b8fc8770dd 100644
> --- a/lib/vhost/version.map
> +++ b/lib/vhost/version.map
> @@ -79,4 +79,7 @@ EXPERIMENTAL {
>
> # added in 21.05
> rte_vhost_get_negotiated_protocol_features;
> +
> + # added in 21.08
> + rte_vhost_try_drain_queue_thread_unsafe;
> };
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 8156796a46..9f541679b9 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -2115,10 +2115,10 @@ write_back_completed_descs_packed(struct
> vhost_virtqueue *vq,
> } while (nr_left > 0);
> }
>
> -uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> +static __rte_always_inline uint16_t
> +vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
> struct rte_mbuf **pkts, uint16_t count)
> {
> - struct virtio_net *dev = get_device(vid);
> struct vhost_virtqueue *vq;
> uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
> uint16_t start_idx, pkts_idx, vq_size;
> @@ -2126,26 +2126,8 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid,
> uint16_t queue_id,
> uint16_t from, i;
> int32_t n_poll;
>
> - if (!dev)
> - return 0;
> -
> - VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> - VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
> - dev->vid, __func__, queue_id);
> - return 0;
> - }
> -
> vq = dev->virtqueue[queue_id];
>
> - if (unlikely(!vq->async_registered)) {
> - VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue
> id %d.\n",
> - dev->vid, __func__, queue_id);
> - return 0;
> - }
> -
> - rte_spinlock_lock(&vq->access_lock);
> -
> pkts_idx = vq->async_pkts_idx % vq->size;
> pkts_info = vq->async_pkts_info;
> vq_size = vq->size;
> @@ -2153,7 +2135,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid,
> uint16_t queue_id,
> vq_size, vq->async_pkts_inflight_n);
>
> if (count > vq->async_last_pkts_n) {
> - n_poll = vq->async_ops.check_completed_copies(vid,
> + n_poll = vq->async_ops.check_completed_copies(dev->vid,
> queue_id, 0, count - vq->async_last_pkts_n);
> if (n_poll >= 0) {
> n_pkts_cpl = n_poll;
> @@ -2168,7 +2150,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid,
> uint16_t queue_id,
> n_pkts_put = RTE_MIN(count, n_pkts_cpl);
> if (unlikely(n_pkts_put == 0)) {
> vq->async_last_pkts_n = n_pkts_cpl;
> - goto done;
> + return 0;
> }
>
> if (vq_is_packed(dev)) {
> @@ -2207,12 +2189,74 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid,
> uint16_t queue_id,
> vq->last_async_desc_idx_split += n_descs;
> }
>
> -done:
> + return n_pkts_put;
> +}
> +
> +uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint16_t count)
Based on DPDK coding style, things like return value should be in another new line.
https://doc.dpdk.org/guides/contributing/coding_style.html#definitions
For similar changes, please check.
> +{
> + struct virtio_net *dev = get_device(vid);
> + struct vhost_virtqueue *vq;
> + uint16_t n_pkts_put = 0;
Since this val is for recording pkts completed, maybe n_pkts_cpl?
> +
> + if (!dev)
> + return 0;
> +
> + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + vq = dev->virtqueue[queue_id];
> +
> + if (unlikely(!vq->async_registered)) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue
> id %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + rte_spinlock_lock(&vq->access_lock);
> +
> + n_pkts_put = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
> +
> rte_spinlock_unlock(&vq->access_lock);
>
> return n_pkts_put;
> }
>
> +uint16_t rte_vhost_try_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
> + struct rte_mbuf **pkts, uint16_t count, uint16_t times)
> +{
> + struct virtio_net *dev = get_device(vid);
> + struct vhost_virtqueue *vq;
> + uint16_t n_cpl = 0;
Make the name same as above
> +
> + if (!dev)
> + return 0;
> +
> + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + vq = dev->virtqueue[queue_id];
> +
> + if (unlikely(!vq->async_registered)) {
> + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue
> id %d.\n",
> + dev->vid, __func__, queue_id);
> + return 0;
> + }
> +
> + while ((n_cpl < count) && times--)
'while (n_cpl < count && times--)' is enough
Thanks,
Chenbo
> + n_cpl += vhost_poll_enqueue_completed(dev, queue_id, pkts + n_cpl,
> count);
> +
> + return n_cpl;
> +}
> +
> static __rte_always_inline uint32_t
> virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
> struct rte_mbuf **pkts, uint32_t count,
> --
> 2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v4 2/5] vhost: add unsafe API to drain pkts in async vhost
2021-07-16 8:56 ` Xia, Chenbo
@ 2021-07-19 3:28 ` Jiang, Cheng1
0 siblings, 0 replies; 70+ messages in thread
From: Jiang, Cheng1 @ 2021-07-19 3:28 UTC (permalink / raw)
To: Xia, Chenbo, maxime.coquelin; +Cc: dev, Hu, Jiayu, Yang, YvonneX
Hi Chenbo,
I'll fix these issues in next version.
For the name, I think maybe we can use 'rte_vhost_clear_queue_thread_unsafe'.
Thanks,
Cheng
> -----Original Message-----
> From: Xia, Chenbo <chenbo.xia@intel.com>
> Sent: Friday, July 16, 2021 4:56 PM
> To: Jiang, Cheng1 <cheng1.jiang@intel.com>; maxime.coquelin@redhat.com
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>
> Subject: RE: [PATCH v4 2/5] vhost: add unsafe API to drain pkts in async vhost
>
> Hi Cheng,
>
> > -----Original Message-----
> > From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> > Sent: Friday, July 16, 2021 10:59 AM
> > To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> > Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> > <yvonnex.yang@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>
> > Subject: [PATCH v4 2/5] vhost: add unsafe API to drain pkts in async
> > vhost
> >
> > Applications need to stop DMA transfers and finish all the in-flight
> > pkts when in VM memory hot-plug case and async vhost is used. This
>
> Pkts -> packets
>
> > patch is to provide an unsafe API to drain in-flight pkts which are
>
> Ditto
>
> > submitted to DMA engine in vhost async data path.
> >
> > Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> > ---
> > lib/vhost/rte_vhost_async.h | 24 ++++++++++
> > lib/vhost/version.map | 3 ++
> > lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
> > 3 files changed, 94 insertions(+), 23 deletions(-)
> >
> > diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
> > index bc81cd0caa..fd622631b2 100644
> > --- a/lib/vhost/rte_vhost_async.h
> > +++ b/lib/vhost/rte_vhost_async.h
> > @@ -193,4 +193,28 @@ __rte_experimental uint16_t
> > rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > struct rte_mbuf **pkts, uint16_t count);
> >
> > +/**
> > + * This function checks async completion status and empty all pakcets
>
> Pakcets -> packets
>
> > + * for a specific vhost device queue. Packets which are inflight will
> > + * be returned in an array.
> > + *
> > + * @note This function does not perform any locking
> > + *
> > + * @param vid
> > + * id of vhost device to enqueue data
>
> id -> ID
>
> to drain data?
>
> > + * @param queue_id
> > + * queue id to enqueue data
>
> Ditto
>
> > + * @param pkts
> > + * blank array to get return packet pointer
>
> Return -> returned
>
> > + * @param count
> > + * size of the packet array
> > + * @param times
> > + * max number of poll attempts
> > + * @return
> > + * num of packets returned
>
> num -> Number
>
> And please use capital for first character in each line of param description.
>
> > + */
> > +__rte_experimental
> > +uint16_t rte_vhost_try_drain_queue_thread_unsafe(int vid, uint16_t
> > +queue_id,
>
> I think 'rte_vhost_drain_queue_thread_unsafe' is better?
>
> > + struct rte_mbuf **pkts, uint16_t count, uint16_t times);
> > +
> > #endif /* _RTE_VHOST_ASYNC_H_ */
> > diff --git a/lib/vhost/version.map b/lib/vhost/version.map index
> > 9103a23cd4..b8fc8770dd 100644
> > --- a/lib/vhost/version.map
> > +++ b/lib/vhost/version.map
> > @@ -79,4 +79,7 @@ EXPERIMENTAL {
> >
> > # added in 21.05
> > rte_vhost_get_negotiated_protocol_features;
> > +
> > + # added in 21.08
> > + rte_vhost_try_drain_queue_thread_unsafe;
> > };
> > diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index
> > 8156796a46..9f541679b9 100644
> > --- a/lib/vhost/virtio_net.c
> > +++ b/lib/vhost/virtio_net.c
> > @@ -2115,10 +2115,10 @@ write_back_completed_descs_packed(struct
> > vhost_virtqueue *vq,
> > } while (nr_left > 0);
> > }
> >
> > -uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > +static __rte_always_inline uint16_t
> > +vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t
> > +queue_id,
> > struct rte_mbuf **pkts, uint16_t count) {
> > - struct virtio_net *dev = get_device(vid);
> > struct vhost_virtqueue *vq;
> > uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
> > uint16_t start_idx, pkts_idx, vq_size; @@ -2126,26 +2126,8 @@
> > uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > uint16_t from, i;
> > int32_t n_poll;
> >
> > - if (!dev)
> > - return 0;
> > -
> > - VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> > - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> > - VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue
> idx %d.\n",
> > - dev->vid, __func__, queue_id);
> > - return 0;
> > - }
> > -
> > vq = dev->virtqueue[queue_id];
> >
> > - if (unlikely(!vq->async_registered)) {
> > - VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for
> queue
> > id %d.\n",
> > - dev->vid, __func__, queue_id);
> > - return 0;
> > - }
> > -
> > - rte_spinlock_lock(&vq->access_lock);
> > -
> > pkts_idx = vq->async_pkts_idx % vq->size;
> > pkts_info = vq->async_pkts_info;
> > vq_size = vq->size;
> > @@ -2153,7 +2135,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int
> > vid, uint16_t queue_id,
> > vq_size, vq->async_pkts_inflight_n);
> >
> > if (count > vq->async_last_pkts_n) {
> > - n_poll = vq->async_ops.check_completed_copies(vid,
> > + n_poll = vq->async_ops.check_completed_copies(dev->vid,
> > queue_id, 0, count - vq->async_last_pkts_n);
> > if (n_poll >= 0) {
> > n_pkts_cpl = n_poll;
> > @@ -2168,7 +2150,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int
> > vid, uint16_t queue_id,
> > n_pkts_put = RTE_MIN(count, n_pkts_cpl);
> > if (unlikely(n_pkts_put == 0)) {
> > vq->async_last_pkts_n = n_pkts_cpl;
> > - goto done;
> > + return 0;
> > }
> >
> > if (vq_is_packed(dev)) {
> > @@ -2207,12 +2189,74 @@ uint16_t
> rte_vhost_poll_enqueue_completed(int
> > vid, uint16_t queue_id,
> > vq->last_async_desc_idx_split += n_descs;
> > }
> >
> > -done:
> > + return n_pkts_put;
> > +}
> > +
> > +uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > + struct rte_mbuf **pkts, uint16_t count)
>
> Based on DPDK coding style, things like return value should be in another
> new line.
>
> https://doc.dpdk.org/guides/contributing/coding_style.html#definitions
>
> For similar changes, please check.
>
> > +{
> > + struct virtio_net *dev = get_device(vid);
> > + struct vhost_virtqueue *vq;
> > + uint16_t n_pkts_put = 0;
>
> Since this val is for recording pkts completed, maybe n_pkts_cpl?
>
> > +
> > + if (!dev)
> > + return 0;
> > +
> > + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> > + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue
> idx %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + vq = dev->virtqueue[queue_id];
> > +
> > + if (unlikely(!vq->async_registered)) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for
> queue
> > id %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + rte_spinlock_lock(&vq->access_lock);
> > +
> > + n_pkts_put = vhost_poll_enqueue_completed(dev, queue_id, pkts,
> > +count);
> > +
> > rte_spinlock_unlock(&vq->access_lock);
> >
> > return n_pkts_put;
> > }
> >
> > +uint16_t rte_vhost_try_drain_queue_thread_unsafe(int vid, uint16_t
> queue_id,
> > + struct rte_mbuf **pkts, uint16_t count, uint16_t times) {
> > + struct virtio_net *dev = get_device(vid);
> > + struct vhost_virtqueue *vq;
> > + uint16_t n_cpl = 0;
>
> Make the name same as above
>
> > +
> > + if (!dev)
> > + return 0;
> > +
> > + VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> > + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue
> idx %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + vq = dev->virtqueue[queue_id];
> > +
> > + if (unlikely(!vq->async_registered)) {
> > + VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for
> queue
> > id %d.\n",
> > + dev->vid, __func__, queue_id);
> > + return 0;
> > + }
> > +
> > + while ((n_cpl < count) && times--)
>
> 'while (n_cpl < count && times--)' is enough
>
> Thanks,
> Chenbo
>
> > + n_cpl += vhost_poll_enqueue_completed(dev, queue_id,
> pkts + n_cpl,
> > count);
> > +
> > + return n_cpl;
> > +}
> > +
> > static __rte_always_inline uint32_t
> > virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
> > struct rte_mbuf **pkts, uint32_t count,
> > --
> > 2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v5 3/5] vhost: handle memory hotplug for async vhost
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 3/5] vhost: handle memory hotplug for " Cheng Jiang
@ 2021-07-19 5:19 ` Xia, Chenbo
2021-07-19 7:56 ` Hu, Jiayu
0 siblings, 1 reply; 70+ messages in thread
From: Xia, Chenbo @ 2021-07-19 5:19 UTC (permalink / raw)
To: Jiang, Cheng1, maxime.coquelin, Hu, Jiayu; +Cc: dev, Yang, YvonneX
Hi Cheng & Jiayu,
> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> Sent: Friday, July 16, 2021 3:25 PM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>
> Subject: [PATCH v5 3/5] vhost: handle memory hotplug for async vhost
>
> From: Jiayu Hu <jiayu.hu@intel.com>
>
> When the guest memory is hotplugged, the vhost application which
> enables DMA acceleration must stop DMA transfers before the vhost
> re-maps the guest memory.
>
> This patch is to notify the vhost application of stopping DMA
> transfers.
>
> Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
> ---
> lib/vhost/vhost_user.c | 9 +++++++++
> 1 file changed, 9 insertions(+)
>
> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> index 031c578e54..39e8432d1c 100644
> --- a/lib/vhost/vhost_user.c
> +++ b/lib/vhost/vhost_user.c
> @@ -1275,6 +1275,15 @@ vhost_user_set_mem_table(struct virtio_net **pdev,
> struct VhostUserMsg *msg,
> vdpa_dev->ops->dev_close(dev->vid);
> dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
> }
> +
> + /* notify the backend application to stop DMA transfers */
Backend application -> vhost application
> + if (dev->async_copy && dev->notify_ops->vring_state_changed) {
> + for (i = 0; i < dev->nr_vring; i++) {
> + dev->notify_ops->vring_state_changed(dev->vid,
> + i, 0);
> + }
> + }
> +
In this case, I think app will never know the vring is enabled again with memory
table updated.
Thanks,
Chenbo
> free_mem_region(dev);
> rte_free(dev->mem);
> dev->mem = NULL;
> --
> 2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v5 3/5] vhost: handle memory hotplug for async vhost
2021-07-19 5:19 ` Xia, Chenbo
@ 2021-07-19 7:56 ` Hu, Jiayu
0 siblings, 0 replies; 70+ messages in thread
From: Hu, Jiayu @ 2021-07-19 7:56 UTC (permalink / raw)
To: Xia, Chenbo, Jiang, Cheng1, maxime.coquelin; +Cc: dev, Yang, YvonneX
> -----Original Message-----
> From: Xia, Chenbo <chenbo.xia@intel.com>
> Sent: Monday, July 19, 2021 1:19 PM
> To: Jiang, Cheng1 <cheng1.jiang@intel.com>; maxime.coquelin@redhat.com;
> Hu, Jiayu <jiayu.hu@intel.com>
> Cc: dev@dpdk.org; Yang, YvonneX <yvonnex.yang@intel.com>
> Subject: RE: [PATCH v5 3/5] vhost: handle memory hotplug for async vhost
>
> Hi Cheng & Jiayu,
>
> > -----Original Message-----
> > From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> > Sent: Friday, July 16, 2021 3:25 PM
> > To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> > Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> > <yvonnex.yang@intel.com>
> > Subject: [PATCH v5 3/5] vhost: handle memory hotplug for async vhost
> >
> > From: Jiayu Hu <jiayu.hu@intel.com>
> >
> > When the guest memory is hotplugged, the vhost application which
> > enables DMA acceleration must stop DMA transfers before the vhost
> > re-maps the guest memory.
> >
> > This patch is to notify the vhost application of stopping DMA
> > transfers.
> >
> > Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
> > ---
> > lib/vhost/vhost_user.c | 9 +++++++++
> > 1 file changed, 9 insertions(+)
> >
> > diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index
> > 031c578e54..39e8432d1c 100644
> > --- a/lib/vhost/vhost_user.c
> > +++ b/lib/vhost/vhost_user.c
> > @@ -1275,6 +1275,15 @@ vhost_user_set_mem_table(struct virtio_net
> > **pdev, struct VhostUserMsg *msg,
> > vdpa_dev->ops->dev_close(dev->vid);
> > dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
> > }
> > +
> > + /* notify the backend application to stop DMA transfers */
>
> Backend application -> vhost application
>
> > + if (dev->async_copy && dev->notify_ops-
> >vring_state_changed) {
> > + for (i = 0; i < dev->nr_vring; i++) {
> > + dev->notify_ops->vring_state_changed(dev-
> >vid,
> > + i, 0);
> > + }
> > + }
> > +
>
> In this case, I think app will never know the vring is enabled again with
> memory table updated.
I will add enable notification in set_mem_table.
Thanks,
Jiayu
>
> Thanks,
> Chenbo
>
> > free_mem_region(dev);
> > rte_free(dev->mem);
> > dev->mem = NULL;
> > --
> > 2.29.2
>
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v6 0/5] vhost: handle memory hotplug for async vhost
2021-06-02 4:28 [dpdk-dev] [PATCH 0/2] vhost: handle memory hotplug for async vhost Cheng Jiang
` (5 preceding siblings ...)
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
@ 2021-07-19 8:10 ` Cheng Jiang
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 1/5] vhost: fix async vhost ops return type Cheng Jiang
` (4 more replies)
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 0/4] " Cheng Jiang
8 siblings, 5 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-19 8:10 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch set is to provide an unsafe API to drain inflight pkts
which are submitted to DMA engine in vhost async data path, and
notify the vhost application of stopping DMA transfers. And enable it
in vhost example.
v6:
* removed unnecessary args for the new API
* improved variable names and function names
* added enable notification in set_mem_table
* fixed vhost example queue clear process
v5:
* added fixes in 'vhost: fix async vhost ops return type'
* improved git log, variable names and logs
v4:
* rebased on the latest codess
v3:
* added a patch to fix async ops return type
* fixed async ops fail handler
* updated the doc
v2:
* changed the patch structure
Cheng Jiang (4):
vhost: fix async vhost ops return type
vhost: add unsafe API to clear packets in async vhost
examples/vhost: handle memory hotplug for async vhost
doc: update doc for inflight packets clear API in vhost lib
Jiayu Hu (1):
vhost: handle memory hotplug for async vhost
doc/guides/prog_guide/vhost_lib.rst | 5 +
doc/guides/rel_notes/release_21_08.rst | 5 +
examples/vhost/ioat.c | 4 +-
examples/vhost/ioat.h | 4 +-
examples/vhost/main.c | 55 ++++++++-
examples/vhost/main.h | 1 +
lib/vhost/rte_vhost_async.h | 30 ++++-
lib/vhost/version.map | 3 +
lib/vhost/vhost_user.c | 16 +++
lib/vhost/virtio_net.c | 152 ++++++++++++++++++++-----
10 files changed, 234 insertions(+), 41 deletions(-)
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v6 1/5] vhost: fix async vhost ops return type
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
@ 2021-07-19 8:10 ` Cheng Jiang
2021-07-21 14:20 ` Maxime Coquelin
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 2/5] vhost: add unsafe API to clear packets in async vhost Cheng Jiang
` (3 subsequent siblings)
4 siblings, 1 reply; 70+ messages in thread
From: Cheng Jiang @ 2021-07-19 8:10 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia
Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang, stable
The async vhost callback ops should return negative value when there
are something wrong in the callback, so the return type should be
changed into int32_t. The issue in vhost example is also fixed.
Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
Fixes: 819a71685826 ("vhost: fix async callback return type")
Fixes: 6b3c81db8bb7 ("vhost: simplify async copy completion")
Fixes: abec60e7115d ("examples/vhost: support vhost async data path")
Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable@dpdk.org
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
examples/vhost/ioat.c | 4 +--
examples/vhost/ioat.h | 4 +--
lib/vhost/rte_vhost_async.h | 8 ++---
lib/vhost/virtio_net.c | 61 ++++++++++++++++++++++++++++++++-----
4 files changed, 61 insertions(+), 16 deletions(-)
diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index 2a2c2d7202..457f8171f0 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -122,7 +122,7 @@ open_ioat(const char *value)
return ret;
}
-uint32_t
+int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count)
@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
return i_desc;
}
-uint32_t
+int32_t
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets)
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 1aa28ed6a3..b57b5645b0 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -27,12 +27,12 @@ struct dma_for_vhost {
#ifdef RTE_RAW_IOAT
int open_ioat(const char *value);
-uint32_t
+int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count);
-uint32_t
+int32_t
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 6faa31f5ad..e964d83837 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -59,9 +59,9 @@ struct rte_vhost_async_channel_ops {
* @param count
* number of elements in the "descs" array
* @return
- * number of descs processed
+ * number of descs processed, negative value means error
*/
- uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+ int32_t (*transfer_data)(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data,
uint16_t count);
@@ -76,9 +76,9 @@ struct rte_vhost_async_channel_ops {
* @param max_packets
* max number of packets could be completed
* @return
- * number of async descs completed
+ * number of async descs completed, negative value means error
*/
- uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+ int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index b93482587c..16ae4d9e19 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1528,6 +1528,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
struct async_inflight_info *pkts_info = vq->async_pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
uint32_t num_async_pkts = 0, num_done_pkts = 0;
+ int32_t n_xfer;
struct {
uint16_t pkt_idx;
uint16_t last_avail_idx;
@@ -1608,8 +1609,17 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
+ n_xfer = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
@@ -1632,8 +1642,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
}
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
- queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
@@ -1903,6 +1920,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
uint16_t async_descs_idx = 0;
uint16_t num_buffers;
uint16_t num_descs;
+ int32_t n_xfer;
struct rte_vhost_iov_iter *it_pool = vq->it_pool;
struct iovec *vec_pool = vq->vec_pool;
@@ -1983,8 +2001,17 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
*/
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
- tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid,
+ queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
segs_await = 0;
@@ -2006,7 +2033,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
} while (pkt_idx < count);
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
@@ -2091,6 +2126,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
uint16_t start_idx, pkts_idx, vq_size;
struct async_inflight_info *pkts_info;
uint16_t from, i;
+ int32_t n_cpl;
if (!dev)
return 0;
@@ -2118,9 +2154,18 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
- if (count > vq->async_last_pkts_n)
- n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+ if (count > vq->async_last_pkts_n) {
+ n_cpl = vq->async_ops.check_completed_copies(vid,
queue_id, 0, count - vq->async_last_pkts_n);
+ if (n_cpl >= 0) {
+ n_pkts_cpl = n_cpl;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to check completed copies for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts_cpl = 0;
+ }
+ }
n_pkts_cpl += vq->async_last_pkts_n;
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v6 2/5] vhost: add unsafe API to clear packets in async vhost
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 1/5] vhost: fix async vhost ops return type Cheng Jiang
@ 2021-07-19 8:10 ` Cheng Jiang
2021-07-21 14:23 ` Maxime Coquelin
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 3/5] vhost: handle memory hotplug for " Cheng Jiang
` (2 subsequent siblings)
4 siblings, 1 reply; 70+ messages in thread
From: Cheng Jiang @ 2021-07-19 8:10 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
Applications need to stop DMA transfers and finish all the inflight
packets when in VM memory hot-plug case and async vhost is used. This
patch is to provide an unsafe API to clear inflight packets which
are submitted to DMA engine in vhost async data path.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
lib/vhost/rte_vhost_async.h | 22 +++++++++
lib/vhost/version.map | 3 ++
lib/vhost/virtio_net.c | 93 +++++++++++++++++++++++++++----------
3 files changed, 94 insertions(+), 24 deletions(-)
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index e964d83837..9961e4970e 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -193,4 +193,26 @@ __rte_experimental
uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count);
+/**
+ * This function checks async completion status and clear packets for
+ * a specific vhost device queue. Packets which are inflight will be
+ * returned in an array.
+ *
+ * @note This function does not perform any locking
+ *
+ * @param vid
+ * ID of vhost device to clear data
+ * @param queue_id
+ * Queue id to clear data
+ * @param pkts
+ * Blank array to get return packet pointer
+ * @param count
+ * Size of the packet array
+ * @return
+ * Number of packets returned
+ */
+__rte_experimental
+uint16_t rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count);
+
#endif /* _RTE_VHOST_ASYNC_H_ */
diff --git a/lib/vhost/version.map b/lib/vhost/version.map
index 9103a23cd4..8dcf9e802a 100644
--- a/lib/vhost/version.map
+++ b/lib/vhost/version.map
@@ -79,4 +79,7 @@ EXPERIMENTAL {
# added in 21.05
rte_vhost_get_negotiated_protocol_features;
+
+ # added in 21.08
+ rte_vhost_clear_queue_thread_unsafe;
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 16ae4d9e19..29f91f9ad4 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2117,10 +2117,10 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
} while (nr_left > 0);
}
-uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+static __rte_always_inline uint16_t
+vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
- struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
uint16_t start_idx, pkts_idx, vq_size;
@@ -2128,26 +2128,8 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
uint16_t from, i;
int32_t n_cpl;
- if (!dev)
- return 0;
-
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
- rte_spinlock_lock(&vq->access_lock);
-
pkts_idx = vq->async_pkts_idx % vq->size;
pkts_info = vq->async_pkts_info;
vq_size = vq->size;
@@ -2155,7 +2137,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq_size, vq->async_pkts_inflight_n);
if (count > vq->async_last_pkts_n) {
- n_cpl = vq->async_ops.check_completed_copies(vid,
+ n_cpl = vq->async_ops.check_completed_copies(dev->vid,
queue_id, 0, count - vq->async_last_pkts_n);
if (n_cpl >= 0) {
n_pkts_cpl = n_cpl;
@@ -2171,7 +2153,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
if (unlikely(n_pkts_put == 0)) {
vq->async_last_pkts_n = n_pkts_cpl;
- goto done;
+ return 0;
}
if (vq_is_packed(dev)) {
@@ -2210,10 +2192,73 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq->last_async_desc_idx_split += n_descs;
}
-done:
+ return n_pkts_put;
+}
+
+uint16_t
+rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_cpl = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
rte_spinlock_unlock(&vq->access_lock);
- return n_pkts_put;
+ return n_pkts_cpl;
+}
+
+uint16_t
+rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_cpl = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
+ return n_pkts_cpl;
}
static __rte_always_inline uint32_t
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v6 3/5] vhost: handle memory hotplug for async vhost
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 2/5] vhost: add unsafe API to clear packets in async vhost Cheng Jiang
@ 2021-07-19 8:10 ` Cheng Jiang
2021-07-21 14:32 ` Maxime Coquelin
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 4/5] examples/vhost: " Cheng Jiang
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 5/5] doc: update doc for inflight packets clear API in vhost lib Cheng Jiang
4 siblings, 1 reply; 70+ messages in thread
From: Cheng Jiang @ 2021-07-19 8:10 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
From: Jiayu Hu <jiayu.hu@intel.com>
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch is to notify the vhost application of stopping DMA
transfers.
Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
---
lib/vhost/vhost_user.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 031c578e54..8106cc1c30 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -1248,6 +1248,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
int numa_node = SOCKET_ID_ANY;
uint64_t mmap_offset;
uint32_t i;
+ bool async_notify = false;
if (validate_msg_fds(msg, memory->nregions) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
@@ -1275,6 +1276,16 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
}
+
+ /* notify the vhost application to stop DMA transfers */
+ if (dev->async_copy && dev->notify_ops->vring_state_changed) {
+ for (i = 0; i < dev->nr_vring; i++) {
+ dev->notify_ops->vring_state_changed(dev->vid,
+ i, 0);
+ }
+ async_notify = true;
+ }
+
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
@@ -1371,6 +1382,11 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
dump_guest_pages(dev);
+ if (async_notify) {
+ for (i = 0; i < dev->nr_vring; i++)
+ dev->notify_ops->vring_state_changed(dev->vid, i, 1);
+ }
+
return RTE_VHOST_MSG_RESULT_OK;
free_mem_table:
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v6 4/5] examples/vhost: handle memory hotplug for async vhost
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
` (2 preceding siblings ...)
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 3/5] vhost: handle memory hotplug for " Cheng Jiang
@ 2021-07-19 8:10 ` Cheng Jiang
2021-07-21 14:37 ` Maxime Coquelin
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 5/5] doc: update doc for inflight packets clear API in vhost lib Cheng Jiang
4 siblings, 1 reply; 70+ messages in thread
From: Cheng Jiang @ 2021-07-19 8:10 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
To accomplish that, we need to do these changes in the vhost sample:
1. add inflight packets count.
2. add vring_state_changed() callback.
3. add inflight packets clear process in destroy_device() and
vring_state_changed().
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
examples/vhost/main.c | 55 +++++++++++++++++++++++++++++++++++++++++--
examples/vhost/main.h | 1 +
2 files changed, 54 insertions(+), 2 deletions(-)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index d2179eadb9..cfd2bc157c 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -851,8 +851,11 @@ complete_async_pkts(struct vhost_dev *vdev)
complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
- if (complete_count)
+ if (complete_count) {
free_pkts(p_cpl, complete_count);
+ __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
+ }
+
}
static __rte_always_inline void
@@ -895,6 +898,7 @@ drain_vhost(struct vhost_dev *vdev)
complete_async_pkts(vdev);
ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST);
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1226,6 +1230,9 @@ drain_eth_rx(struct vhost_dev *vdev)
enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
VIRTIO_RXQ, pkts, rx_count,
m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr,
+ __ATOMIC_SEQ_CST);
+
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1397,8 +1404,19 @@ destroy_device(int vid)
"(%d) device has been removed from data core\n",
vdev->vid);
- if (async_vhost_driver)
+ if (async_vhost_driver) {
+ uint16_t n_pkt = 0;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+ while (vdev->pkts_inflight) {
+ n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
+ m_cpl, vdev->pkts_inflight);
+ free_pkts(m_cpl, n_pkt);
+ __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+ }
+
rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
+ }
rte_free(vdev);
}
@@ -1487,6 +1505,38 @@ new_device(int vid)
return 0;
}
+static int
+vring_state_changed(int vid, uint16_t queue_id, int enable)
+{
+ struct vhost_dev *vdev = NULL;
+
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (vdev->vid == vid)
+ break;
+ }
+ if (!vdev)
+ return -1;
+
+ if (queue_id != VIRTIO_RXQ)
+ return 0;
+
+ if (async_vhost_driver) {
+ if (!enable) {
+ uint16_t n_pkt = 0;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+ while (vdev->pkts_inflight) {
+ n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
+ m_cpl, vdev->pkts_inflight);
+ free_pkts(m_cpl, n_pkt);
+ __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+ }
+ }
+ }
+
+ return 0;
+}
+
/*
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
@@ -1495,6 +1545,7 @@ static const struct vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
+ .vring_state_changed = vring_state_changed,
};
/*
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 0ccdce4b4a..e7b1ac60a6 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -51,6 +51,7 @@ struct vhost_dev {
uint64_t features;
size_t hdr_len;
uint16_t nr_vrings;
+ uint16_t pkts_inflight;
struct rte_vhost_memory *mem;
struct device_statistics stats;
TAILQ_ENTRY(vhost_dev) global_vdev_entry;
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v6 5/5] doc: update doc for inflight packets clear API in vhost lib
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
` (3 preceding siblings ...)
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 4/5] examples/vhost: " Cheng Jiang
@ 2021-07-19 8:10 ` Cheng Jiang
2021-07-21 14:37 ` Maxime Coquelin
4 siblings, 1 reply; 70+ messages in thread
From: Cheng Jiang @ 2021-07-19 8:10 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
Update the program guide and release notes for inflight packets clear
API in vhost lib.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
doc/guides/prog_guide/vhost_lib.rst | 5 +++++
doc/guides/rel_notes/release_21_08.rst | 5 +++++
2 files changed, 10 insertions(+)
diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index d18fb98910..3cdfdc0725 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -281,6 +281,11 @@ The following is an overview of some key Vhost API functions:
Poll enqueue completion status from async data path. Completed packets
are returned to applications through ``pkts``.
+* ``rte_vhost_clear_queue_thread_unsafe(vid, queue_id, **pkts, count)``
+
+ Clear inflight packets which are submitted to DMA engine in vhost async data
+ path. Completed packets are returned to applications through ``pkts``.
+
Vhost-user Implementations
--------------------------
diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
index 6a902ef9ac..482d16ba13 100644
--- a/doc/guides/rel_notes/release_21_08.rst
+++ b/doc/guides/rel_notes/release_21_08.rst
@@ -117,6 +117,11 @@ New Features
The experimental PMD power management API now supports managing
multiple Ethernet Rx queues per lcore.
+* **Added inflight packets clear API in vhost library.**
+
+ Added an API which can clear the inflight packets submitted to DMA
+ engine in vhost async data path.
+
Removed Items
-------------
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v6 1/5] vhost: fix async vhost ops return type
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 1/5] vhost: fix async vhost ops return type Cheng Jiang
@ 2021-07-21 14:20 ` Maxime Coquelin
0 siblings, 0 replies; 70+ messages in thread
From: Maxime Coquelin @ 2021-07-21 14:20 UTC (permalink / raw)
To: Cheng Jiang, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, stable
On 7/19/21 10:10 AM, Cheng Jiang wrote:
> The async vhost callback ops should return negative value when there
> are something wrong in the callback, so the return type should be
> changed into int32_t. The issue in vhost example is also fixed.
>
> Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
> Fixes: 819a71685826 ("vhost: fix async callback return type")
> Fixes: 6b3c81db8bb7 ("vhost: simplify async copy completion")
> Fixes: abec60e7115d ("examples/vhost: support vhost async data path")
> Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
> Cc: stable@dpdk.org
>
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
> examples/vhost/ioat.c | 4 +--
> examples/vhost/ioat.h | 4 +--
> lib/vhost/rte_vhost_async.h | 8 ++---
> lib/vhost/virtio_net.c | 61 ++++++++++++++++++++++++++++++++-----
> 4 files changed, 61 insertions(+), 16 deletions(-)
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v6 2/5] vhost: add unsafe API to clear packets in async vhost
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 2/5] vhost: add unsafe API to clear packets in async vhost Cheng Jiang
@ 2021-07-21 14:23 ` Maxime Coquelin
0 siblings, 0 replies; 70+ messages in thread
From: Maxime Coquelin @ 2021-07-21 14:23 UTC (permalink / raw)
To: Cheng Jiang, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
On 7/19/21 10:10 AM, Cheng Jiang wrote:
> Applications need to stop DMA transfers and finish all the inflight
> packets when in VM memory hot-plug case and async vhost is used. This
> patch is to provide an unsafe API to clear inflight packets which
> are submitted to DMA engine in vhost async data path.
>
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
> lib/vhost/rte_vhost_async.h | 22 +++++++++
> lib/vhost/version.map | 3 ++
> lib/vhost/virtio_net.c | 93 +++++++++++++++++++++++++++----------
> 3 files changed, 94 insertions(+), 24 deletions(-)
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v6 3/5] vhost: handle memory hotplug for async vhost
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 3/5] vhost: handle memory hotplug for " Cheng Jiang
@ 2021-07-21 14:32 ` Maxime Coquelin
0 siblings, 0 replies; 70+ messages in thread
From: Maxime Coquelin @ 2021-07-21 14:32 UTC (permalink / raw)
To: Cheng Jiang, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
On 7/19/21 10:10 AM, Cheng Jiang wrote:
> From: Jiayu Hu <jiayu.hu@intel.com>
>
> When the guest memory is hotplugged, the vhost application which
> enables DMA acceleration must stop DMA transfers before the vhost
> re-maps the guest memory.
>
> This patch is to notify the vhost application of stopping DMA
> transfers.
>
> Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
> ---
> lib/vhost/vhost_user.c | 16 ++++++++++++++++
> 1 file changed, 16 insertions(+)
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v6 4/5] examples/vhost: handle memory hotplug for async vhost
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 4/5] examples/vhost: " Cheng Jiang
@ 2021-07-21 14:37 ` Maxime Coquelin
0 siblings, 0 replies; 70+ messages in thread
From: Maxime Coquelin @ 2021-07-21 14:37 UTC (permalink / raw)
To: Cheng Jiang, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
On 7/19/21 10:10 AM, Cheng Jiang wrote:
> When the guest memory is hotplugged, the vhost application which
> enables DMA acceleration must stop DMA transfers before the vhost
> re-maps the guest memory.
>
> To accomplish that, we need to do these changes in the vhost sample:
> 1. add inflight packets count.
> 2. add vring_state_changed() callback.
> 3. add inflight packets clear process in destroy_device() and
> vring_state_changed().
>
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
> examples/vhost/main.c | 55 +++++++++++++++++++++++++++++++++++++++++--
> examples/vhost/main.h | 1 +
> 2 files changed, 54 insertions(+), 2 deletions(-)
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v6 5/5] doc: update doc for inflight packets clear API in vhost lib
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 5/5] doc: update doc for inflight packets clear API in vhost lib Cheng Jiang
@ 2021-07-21 14:37 ` Maxime Coquelin
0 siblings, 0 replies; 70+ messages in thread
From: Maxime Coquelin @ 2021-07-21 14:37 UTC (permalink / raw)
To: Cheng Jiang, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
On 7/19/21 10:10 AM, Cheng Jiang wrote:
> Update the program guide and release notes for inflight packets clear
> API in vhost lib.
>
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
> doc/guides/prog_guide/vhost_lib.rst | 5 +++++
> doc/guides/rel_notes/release_21_08.rst | 5 +++++
> 2 files changed, 10 insertions(+)
>
> diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
> index d18fb98910..3cdfdc0725 100644
> --- a/doc/guides/prog_guide/vhost_lib.rst
> +++ b/doc/guides/prog_guide/vhost_lib.rst
> @@ -281,6 +281,11 @@ The following is an overview of some key Vhost API functions:
> Poll enqueue completion status from async data path. Completed packets
> are returned to applications through ``pkts``.
>
> +* ``rte_vhost_clear_queue_thread_unsafe(vid, queue_id, **pkts, count)``
> +
> + Clear inflight packets which are submitted to DMA engine in vhost async data
> + path. Completed packets are returned to applications through ``pkts``.
> +
> Vhost-user Implementations
> --------------------------
>
> diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
> index 6a902ef9ac..482d16ba13 100644
> --- a/doc/guides/rel_notes/release_21_08.rst
> +++ b/doc/guides/rel_notes/release_21_08.rst
> @@ -117,6 +117,11 @@ New Features
> The experimental PMD power management API now supports managing
> multiple Ethernet Rx queues per lcore.
>
> +* **Added inflight packets clear API in vhost library.**
> +
> + Added an API which can clear the inflight packets submitted to DMA
> + engine in vhost async data path.
> +
>
> Removed Items
> -------------
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost
2021-06-02 4:28 [dpdk-dev] [PATCH 0/2] vhost: handle memory hotplug for async vhost Cheng Jiang
` (6 preceding siblings ...)
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
@ 2021-07-22 4:09 ` Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 1/5] vhost: fix async vhost ops return type Cheng Jiang
` (5 more replies)
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 0/4] " Cheng Jiang
8 siblings, 6 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-22 4:09 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch set is to provide an unsafe API to drain inflight pkts
which are submitted to DMA engine in vhost async data path, and
notify the vhost application of stopping DMA transfers. And enable it
in vhost example.
v7:
* rebased on the latest codes
* improved commit log
v6:
* removed unnecessary args for the new API
* improved variable names and function names
* added enable notification in set_mem_table
* fixed vhost example queue clear process
v5:
* added fixes in 'vhost: fix async vhost ops return type'
* improved git log, variable names and logs
v4:
* rebased on the latest codes
v3:
* added a patch to fix async ops return type
* fixed async ops fail handler
* updated the doc
v2:
* changed the patch structure
Cheng Jiang (4):
vhost: fix async vhost ops return type
vhost: add unsafe API to clear packets in async vhost
examples/vhost: handle memory hotplug for async vhost
doc: update doc for queue clear API in vhost lib
Jiayu Hu (1):
vhost: handle memory hotplug for async vhost
doc/guides/prog_guide/vhost_lib.rst | 5 +
doc/guides/rel_notes/release_21_08.rst | 5 +
examples/vhost/ioat.c | 4 +-
examples/vhost/ioat.h | 4 +-
examples/vhost/main.c | 55 ++++++++-
examples/vhost/main.h | 1 +
lib/vhost/rte_vhost_async.h | 30 ++++-
lib/vhost/version.map | 1 +
lib/vhost/vhost_user.c | 16 +++
lib/vhost/virtio_net.c | 152 ++++++++++++++++++++-----
10 files changed, 232 insertions(+), 41 deletions(-)
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v7 1/5] vhost: fix async vhost ops return type
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
@ 2021-07-22 4:09 ` Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 2/5] vhost: add unsafe API to clear packets in async vhost Cheng Jiang
` (4 subsequent siblings)
5 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-22 4:09 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia
Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang, stable
The async vhost callback ops should return negative value when there
are something wrong in the callback, so the return type should be
changed into int32_t. The issue in vhost example is also fixed.
Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
Fixes: 819a71685826 ("vhost: fix async callback return type")
Fixes: 6b3c81db8bb7 ("vhost: simplify async copy completion")
Fixes: abec60e7115d ("examples/vhost: support vhost async data path")
Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable@dpdk.org
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
examples/vhost/ioat.c | 4 +--
examples/vhost/ioat.h | 4 +--
lib/vhost/rte_vhost_async.h | 8 ++---
lib/vhost/virtio_net.c | 61 ++++++++++++++++++++++++++++++++-----
4 files changed, 61 insertions(+), 16 deletions(-)
diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index 2a2c2d7202..457f8171f0 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -122,7 +122,7 @@ open_ioat(const char *value)
return ret;
}
-uint32_t
+int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count)
@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
return i_desc;
}
-uint32_t
+int32_t
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets)
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 1aa28ed6a3..b57b5645b0 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -27,12 +27,12 @@ struct dma_for_vhost {
#ifdef RTE_RAW_IOAT
int open_ioat(const char *value);
-uint32_t
+int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count);
-uint32_t
+int32_t
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 69ec66bba5..02d012ae23 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -59,9 +59,9 @@ struct rte_vhost_async_channel_ops {
* @param count
* number of elements in the "descs" array
* @return
- * number of descs processed
+ * number of descs processed, negative value means error
*/
- uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+ int32_t (*transfer_data)(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data,
uint16_t count);
@@ -76,9 +76,9 @@ struct rte_vhost_async_channel_ops {
* @param max_packets
* max number of packets could be completed
* @return
- * number of async descs completed
+ * number of async descs completed, negative value means error
*/
- uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+ int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 6e5d82c1a8..3ab5229f76 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1644,6 +1644,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
struct async_inflight_info *pkts_info = vq->async_pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
uint32_t num_async_pkts = 0, num_done_pkts = 0;
+ int32_t n_xfer;
struct {
uint16_t pkt_idx;
uint16_t last_avail_idx;
@@ -1724,8 +1725,17 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
+ n_xfer = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
@@ -1748,8 +1758,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
}
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
- queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
@@ -1996,6 +2013,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
uint16_t async_descs_idx = 0;
uint16_t num_buffers;
uint16_t num_descs;
+ int32_t n_xfer;
struct rte_vhost_iov_iter *it_pool = vq->it_pool;
struct iovec *vec_pool = vq->vec_pool;
@@ -2078,8 +2096,17 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
*/
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
- tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid,
+ queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
segs_await = 0;
@@ -2101,7 +2128,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
} while (pkt_idx < count);
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
@@ -2188,6 +2223,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
uint16_t start_idx, pkts_idx, vq_size;
struct async_inflight_info *pkts_info;
uint16_t from, i;
+ int32_t n_cpl;
if (!dev)
return 0;
@@ -2215,9 +2251,18 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
- if (count > vq->async_last_pkts_n)
- n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+ if (count > vq->async_last_pkts_n) {
+ n_cpl = vq->async_ops.check_completed_copies(vid,
queue_id, 0, count - vq->async_last_pkts_n);
+ if (n_cpl >= 0) {
+ n_pkts_cpl = n_cpl;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to check completed copies for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts_cpl = 0;
+ }
+ }
n_pkts_cpl += vq->async_last_pkts_n;
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v7 2/5] vhost: add unsafe API to clear packets in async vhost
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 1/5] vhost: fix async vhost ops return type Cheng Jiang
@ 2021-07-22 4:09 ` Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 3/5] vhost: handle memory hotplug for " Cheng Jiang
` (3 subsequent siblings)
5 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-22 4:09 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
Applications need to stop DMA transfers and finish all the inflight
packets when in VM memory hot-plug case and async vhost is used. This
patch is to provide an unsafe API to clear inflight packets which
are submitted to DMA engine in vhost async data path.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/vhost/rte_vhost_async.h | 22 +++++++++
lib/vhost/version.map | 1 +
lib/vhost/virtio_net.c | 93 +++++++++++++++++++++++++++----------
3 files changed, 92 insertions(+), 24 deletions(-)
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 02d012ae23..b25ff446f7 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -246,4 +246,26 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
__rte_experimental
int rte_vhost_async_get_inflight(int vid, uint16_t queue_id);
+/**
+ * This function checks async completion status and clear packets for
+ * a specific vhost device queue. Packets which are inflight will be
+ * returned in an array.
+ *
+ * @note This function does not perform any locking
+ *
+ * @param vid
+ * ID of vhost device to clear data
+ * @param queue_id
+ * Queue id to clear data
+ * @param pkts
+ * Blank array to get return packet pointer
+ * @param count
+ * Size of the packet array
+ * @return
+ * Number of packets returned
+ */
+__rte_experimental
+uint16_t rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count);
+
#endif /* _RTE_VHOST_ASYNC_H_ */
diff --git a/lib/vhost/version.map b/lib/vhost/version.map
index e0c89646e8..e2504ba657 100644
--- a/lib/vhost/version.map
+++ b/lib/vhost/version.map
@@ -84,4 +84,5 @@ EXPERIMENTAL {
rte_vhost_async_get_inflight;
rte_vhost_async_channel_register_thread_unsafe;
rte_vhost_async_channel_unregister_thread_unsafe;
+ rte_vhost_clear_queue_thread_unsafe;
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 3ab5229f76..8549afbbe1 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2214,10 +2214,10 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
} while (nr_left > 0);
}
-uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+static __rte_always_inline uint16_t
+vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
- struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
uint16_t start_idx, pkts_idx, vq_size;
@@ -2225,26 +2225,8 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
uint16_t from, i;
int32_t n_cpl;
- if (!dev)
- return 0;
-
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
- rte_spinlock_lock(&vq->access_lock);
-
pkts_idx = vq->async_pkts_idx % vq->size;
pkts_info = vq->async_pkts_info;
vq_size = vq->size;
@@ -2252,7 +2234,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq_size, vq->async_pkts_inflight_n);
if (count > vq->async_last_pkts_n) {
- n_cpl = vq->async_ops.check_completed_copies(vid,
+ n_cpl = vq->async_ops.check_completed_copies(dev->vid,
queue_id, 0, count - vq->async_last_pkts_n);
if (n_cpl >= 0) {
n_pkts_cpl = n_cpl;
@@ -2268,7 +2250,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
if (unlikely(n_pkts_put == 0)) {
vq->async_last_pkts_n = n_pkts_cpl;
- goto done;
+ return 0;
}
if (vq_is_packed(dev)) {
@@ -2310,10 +2292,73 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
}
}
-done:
+ return n_pkts_put;
+}
+
+uint16_t
+rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_cpl = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
rte_spinlock_unlock(&vq->access_lock);
- return n_pkts_put;
+ return n_pkts_cpl;
+}
+
+uint16_t
+rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_cpl = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
+ return n_pkts_cpl;
}
static __rte_always_inline uint32_t
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v7 3/5] vhost: handle memory hotplug for async vhost
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 2/5] vhost: add unsafe API to clear packets in async vhost Cheng Jiang
@ 2021-07-22 4:09 ` Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 4/5] examples/vhost: " Cheng Jiang
` (2 subsequent siblings)
5 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-22 4:09 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
From: Jiayu Hu <jiayu.hu@intel.com>
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch is to notify the vhost application of stopping DMA
transfers.
Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/vhost/vhost_user.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 31300e194f..433f412fa8 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -1248,6 +1248,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
int numa_node = SOCKET_ID_ANY;
uint64_t mmap_offset;
uint32_t i;
+ bool async_notify = false;
if (validate_msg_fds(msg, memory->nregions) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
@@ -1275,6 +1276,16 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
}
+
+ /* notify the vhost application to stop DMA transfers */
+ if (dev->async_copy && dev->notify_ops->vring_state_changed) {
+ for (i = 0; i < dev->nr_vring; i++) {
+ dev->notify_ops->vring_state_changed(dev->vid,
+ i, 0);
+ }
+ async_notify = true;
+ }
+
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
@@ -1371,6 +1382,11 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
dump_guest_pages(dev);
+ if (async_notify) {
+ for (i = 0; i < dev->nr_vring; i++)
+ dev->notify_ops->vring_state_changed(dev->vid, i, 1);
+ }
+
return RTE_VHOST_MSG_RESULT_OK;
free_mem_table:
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v7 4/5] examples/vhost: handle memory hotplug for async vhost
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
` (2 preceding siblings ...)
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 3/5] vhost: handle memory hotplug for " Cheng Jiang
@ 2021-07-22 4:09 ` Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 5/5] doc: update doc for queue clear API in vhost lib Cheng Jiang
2021-07-22 5:07 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Xia, Chenbo
5 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-22 4:09 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
To accomplish that, we need to do these changes in the vhost sample:
1. add inflight packets count.
2. add vring_state_changed() callback.
3. add inflight packets clear process in destroy_device() and
vring_state_changed().
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
examples/vhost/main.c | 55 +++++++++++++++++++++++++++++++++++++++++--
examples/vhost/main.h | 1 +
2 files changed, 54 insertions(+), 2 deletions(-)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 9cd855a696..bc3d71c898 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -851,8 +851,11 @@ complete_async_pkts(struct vhost_dev *vdev)
complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
- if (complete_count)
+ if (complete_count) {
free_pkts(p_cpl, complete_count);
+ __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
+ }
+
}
static __rte_always_inline void
@@ -895,6 +898,7 @@ drain_vhost(struct vhost_dev *vdev)
complete_async_pkts(vdev);
ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST);
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1226,6 +1230,9 @@ drain_eth_rx(struct vhost_dev *vdev)
enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
VIRTIO_RXQ, pkts, rx_count,
m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr,
+ __ATOMIC_SEQ_CST);
+
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1397,8 +1404,19 @@ destroy_device(int vid)
"(%d) device has been removed from data core\n",
vdev->vid);
- if (async_vhost_driver)
+ if (async_vhost_driver) {
+ uint16_t n_pkt = 0;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+ while (vdev->pkts_inflight) {
+ n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
+ m_cpl, vdev->pkts_inflight);
+ free_pkts(m_cpl, n_pkt);
+ __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+ }
+
rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
+ }
rte_free(vdev);
}
@@ -1487,6 +1505,38 @@ new_device(int vid)
return 0;
}
+static int
+vring_state_changed(int vid, uint16_t queue_id, int enable)
+{
+ struct vhost_dev *vdev = NULL;
+
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (vdev->vid == vid)
+ break;
+ }
+ if (!vdev)
+ return -1;
+
+ if (queue_id != VIRTIO_RXQ)
+ return 0;
+
+ if (async_vhost_driver) {
+ if (!enable) {
+ uint16_t n_pkt = 0;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+ while (vdev->pkts_inflight) {
+ n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
+ m_cpl, vdev->pkts_inflight);
+ free_pkts(m_cpl, n_pkt);
+ __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+ }
+ }
+ }
+
+ return 0;
+}
+
/*
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
@@ -1495,6 +1545,7 @@ static const struct vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
+ .vring_state_changed = vring_state_changed,
};
/*
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 0ccdce4b4a..e7b1ac60a6 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -51,6 +51,7 @@ struct vhost_dev {
uint64_t features;
size_t hdr_len;
uint16_t nr_vrings;
+ uint16_t pkts_inflight;
struct rte_vhost_memory *mem;
struct device_statistics stats;
TAILQ_ENTRY(vhost_dev) global_vdev_entry;
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v7 5/5] doc: update doc for queue clear API in vhost lib
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
` (3 preceding siblings ...)
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 4/5] examples/vhost: " Cheng Jiang
@ 2021-07-22 4:09 ` Cheng Jiang
2021-07-22 5:07 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Xia, Chenbo
5 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-22 4:09 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
Update the program guide and release notes for virtqueue inflight
packets clear API in vhost lib.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
doc/guides/prog_guide/vhost_lib.rst | 5 +++++
doc/guides/rel_notes/release_21_08.rst | 5 +++++
2 files changed, 10 insertions(+)
diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index 70ce4974df..8874033165 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -305,6 +305,11 @@ The following is an overview of some key Vhost API functions:
This function returns the amount of in-flight packets for the vhost
queue using async acceleration.
+* ``rte_vhost_clear_queue_thread_unsafe(vid, queue_id, **pkts, count)``
+
+ Clear inflight packets which are submitted to DMA engine in vhost async data
+ path. Completed packets are returned to applications through ``pkts``.
+
Vhost-user Implementations
--------------------------
diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
index 543e93ff1d..d9c4cc5df0 100644
--- a/doc/guides/rel_notes/release_21_08.rst
+++ b/doc/guides/rel_notes/release_21_08.rst
@@ -155,6 +155,11 @@ New Features
The experimental PMD power management API now supports managing
multiple Ethernet Rx queues per lcore.
+* **Added inflight packets clear API in vhost library.**
+
+ Added an API which can clear the inflight packets submitted to DMA
+ engine in vhost async data path.
+
Removed Items
-------------
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
` (4 preceding siblings ...)
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 5/5] doc: update doc for queue clear API in vhost lib Cheng Jiang
@ 2021-07-22 5:07 ` Xia, Chenbo
2021-07-22 16:12 ` Thomas Monjalon
5 siblings, 1 reply; 70+ messages in thread
From: Xia, Chenbo @ 2021-07-22 5:07 UTC (permalink / raw)
To: Jiang, Cheng1, maxime.coquelin; +Cc: dev, Hu, Jiayu, Yang, YvonneX
> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> Sent: Thursday, July 22, 2021 12:09 PM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>
> Subject: [PATCH v7 0/5] vhost: handle memory hotplug for async vhost
>
> When the guest memory is hotplugged, the vhost application which
> enables DMA acceleration must stop DMA transfers before the vhost
> re-maps the guest memory.
>
> This patch set is to provide an unsafe API to drain inflight pkts
> which are submitted to DMA engine in vhost async data path, and
> notify the vhost application of stopping DMA transfers. And enable it
> in vhost example.
>
> v7:
> * rebased on the latest codes
> * improved commit log
> v6:
> * removed unnecessary args for the new API
> * improved variable names and function names
> * added enable notification in set_mem_table
> * fixed vhost example queue clear process
> v5:
> * added fixes in 'vhost: fix async vhost ops return type'
> * improved git log, variable names and logs
> v4:
> * rebased on the latest codes
> v3:
> * added a patch to fix async ops return type
> * fixed async ops fail handler
> * updated the doc
> v2:
> * changed the patch structure
>
> Cheng Jiang (4):
> vhost: fix async vhost ops return type
> vhost: add unsafe API to clear packets in async vhost
> examples/vhost: handle memory hotplug for async vhost
> doc: update doc for queue clear API in vhost lib
>
> Jiayu Hu (1):
> vhost: handle memory hotplug for async vhost
>
> doc/guides/prog_guide/vhost_lib.rst | 5 +
> doc/guides/rel_notes/release_21_08.rst | 5 +
> examples/vhost/ioat.c | 4 +-
> examples/vhost/ioat.h | 4 +-
> examples/vhost/main.c | 55 ++++++++-
> examples/vhost/main.h | 1 +
> lib/vhost/rte_vhost_async.h | 30 ++++-
> lib/vhost/version.map | 1 +
> lib/vhost/vhost_user.c | 16 +++
> lib/vhost/virtio_net.c | 152 ++++++++++++++++++++-----
> 10 files changed, 232 insertions(+), 41 deletions(-)
>
> --
> 2.29.2
Series applied to next-virtio/main. Thanks
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost
2021-07-22 5:07 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Xia, Chenbo
@ 2021-07-22 16:12 ` Thomas Monjalon
2021-07-23 5:06 ` Xia, Chenbo
0 siblings, 1 reply; 70+ messages in thread
From: Thomas Monjalon @ 2021-07-22 16:12 UTC (permalink / raw)
To: Jiang, Cheng1, Xia, Chenbo
Cc: maxime.coquelin, dev, Hu, Jiayu, Yang, YvonneX, david.marchand
22/07/2021 07:07, Xia, Chenbo:
> From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> > When the guest memory is hotplugged, the vhost application which
> > enables DMA acceleration must stop DMA transfers before the vhost
> > re-maps the guest memory.
> >
> > This patch set is to provide an unsafe API to drain inflight pkts
> > which are submitted to DMA engine in vhost async data path, and
> > notify the vhost application of stopping DMA transfers. And enable it
> > in vhost example.
>
> Series applied to next-virtio/main. Thanks
I cannot pull this series in main branch.
There is a compilation error seen on Arm cross-compilation:
examples/vhost/main.c:1493:51: error: assignment to 'int32_t (*)(int, uint16_t, struct rte_vhost_async_desc *, struct rte_vhost_async_status *, uint16_t)' {aka 'int (*)(int, short unsigned int, struct rte_vhost_async_desc *, struct rte_vhost_async_status *, short unsigned int)'} from incompatible pointer type 'uint32_t (*)(int, uint16_t, struct rte_vhost_async_desc *, struct rte_vhost_async_status *, uint16_t)' {aka 'unsigned int (*)(int, short unsigned int, struct rte_vhost_async_desc *, struct rte_vhost_async_status *, short unsigned int)'} [-Werror=incompatible-pointer-types]
1493 | channel_ops.transfer_data = ioat_transfer_data_cb;
| ^
Other comments about the last patch:
- it is updating doc out of the original patch doing the code changes
- there is not even a reference to the code patch (Fixes: line)
- the addition in the release notes is not sorted
Last question while at it, why having the API documentation
in the vhost guide (rst file)?
Doxygen is not enough to describe the functions?
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost
2021-07-22 16:12 ` Thomas Monjalon
@ 2021-07-23 5:06 ` Xia, Chenbo
2021-07-23 7:25 ` Thomas Monjalon
0 siblings, 1 reply; 70+ messages in thread
From: Xia, Chenbo @ 2021-07-23 5:06 UTC (permalink / raw)
To: Thomas Monjalon, Jiang, Cheng1, maxime.coquelin
Cc: dev, Hu, Jiayu, Yang, YvonneX, david.marchand
Hi Thomas,
> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Friday, July 23, 2021 12:13 AM
> To: Jiang, Cheng1 <cheng1.jiang@intel.com>; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: maxime.coquelin@redhat.com; dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>;
> Yang, YvonneX <yvonnex.yang@intel.com>; david.marchand@redhat.com
> Subject: Re: [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async
> vhost
>
> 22/07/2021 07:07, Xia, Chenbo:
> > From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> > > When the guest memory is hotplugged, the vhost application which
> > > enables DMA acceleration must stop DMA transfers before the vhost
> > > re-maps the guest memory.
> > >
> > > This patch set is to provide an unsafe API to drain inflight pkts
> > > which are submitted to DMA engine in vhost async data path, and
> > > notify the vhost application of stopping DMA transfers. And enable it
> > > in vhost example.
> >
> > Series applied to next-virtio/main. Thanks
>
> I cannot pull this series in main branch.
>
> There is a compilation error seen on Arm cross-compilation:
>
> examples/vhost/main.c:1493:51: error: assignment to 'int32_t (*)(int,
> uint16_t, struct rte_vhost_async_desc *, struct rte_vhost_async_status *,
> uint16_t)' {aka 'int (*)(int, short unsigned int, struct
> rte_vhost_async_desc *, struct rte_vhost_async_status *, short unsigned int)'}
> from incompatible pointer type 'uint32_t (*)(int, uint16_t, struct
> rte_vhost_async_desc *, struct rte_vhost_async_status *, uint16_t)' {aka
> 'unsigned int (*)(int, short unsigned int, struct rte_vhost_async_desc *,
> struct rte_vhost_async_status *, short unsigned int)'} [-Werror=incompatible-
> pointer-types]
> 1493 | channel_ops.transfer_data =
> ioat_transfer_data_cb;
> | ^
I see. @Cheng, please fix it in new version.
>
> Other comments about the last patch:
> - it is updating doc out of the original patch doing the code changes
> - there is not even a reference to the code patch (Fixes: line)
I think the doc patch could be combined with the code patch in the same series.
But personally, sometimes I am not very clear when doc patch should be split.
For example, in this case we can combine as the update in release note is related
only to the code patch. What if it's related to multiple patch? Should we split or
add doc changes to every related patches? Just a bit confused. Maybe you can give
me some general guidance so that we will be on the same page.
> - the addition in the release notes is not sorted
Not very clear on this. The change is put in the bottom. Is there any sorting
rules?
>
> Last question while at it, why having the API documentation
> in the vhost guide (rst file)?
> Doxygen is not enough to describe the functions?
Good point. To be honest, I have not thought about it :P
I think it could be moved to the doxygen later (maybe in another patch). The only
concern of mine is some API description in the vhost guide is a bit long.
@Maxime What do you think?
Thanks,
Chenbo
>
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost
2021-07-23 5:06 ` Xia, Chenbo
@ 2021-07-23 7:25 ` Thomas Monjalon
2021-07-23 7:34 ` Xia, Chenbo
0 siblings, 1 reply; 70+ messages in thread
From: Thomas Monjalon @ 2021-07-23 7:25 UTC (permalink / raw)
To: maxime.coquelin, Xia, Chenbo
Cc: Jiang, Cheng1, dev, Hu, Jiayu, Yang, YvonneX, david.marchand,
ferruh.yigit
23/07/2021 07:06, Xia, Chenbo:
> From: Thomas Monjalon <thomas@monjalon.net>
> > 22/07/2021 07:07, Xia, Chenbo:
> > > From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> > > > When the guest memory is hotplugged, the vhost application which
> > > > enables DMA acceleration must stop DMA transfers before the vhost
> > > > re-maps the guest memory.
> > > >
> > > > This patch set is to provide an unsafe API to drain inflight pkts
> > > > which are submitted to DMA engine in vhost async data path, and
> > > > notify the vhost application of stopping DMA transfers. And enable it
> > > > in vhost example.
> > >
> > > Series applied to next-virtio/main. Thanks
> >
> > I cannot pull this series in main branch.
> >
> > There is a compilation error seen on Arm cross-compilation:
> >
> > examples/vhost/main.c:1493:51: error: assignment to 'int32_t (*)(int,
> > uint16_t, struct rte_vhost_async_desc *, struct rte_vhost_async_status *,
> > uint16_t)' {aka 'int (*)(int, short unsigned int, struct
> > rte_vhost_async_desc *, struct rte_vhost_async_status *, short unsigned int)'}
> > from incompatible pointer type 'uint32_t (*)(int, uint16_t, struct
> > rte_vhost_async_desc *, struct rte_vhost_async_status *, uint16_t)' {aka
> > 'unsigned int (*)(int, short unsigned int, struct rte_vhost_async_desc *,
> > struct rte_vhost_async_status *, short unsigned int)'} [-Werror=incompatible-
> > pointer-types]
> > 1493 | channel_ops.transfer_data =
> > ioat_transfer_data_cb;
> > | ^
>
> I see. @Cheng, please fix it in new version.
>
> >
> > Other comments about the last patch:
> > - it is updating doc out of the original patch doing the code changes
> > - there is not even a reference to the code patch (Fixes: line)
>
> I think the doc patch could be combined with the code patch in the same series.
> But personally, sometimes I am not very clear when doc patch should be split.
> For example, in this case we can combine as the update in release note is related
> only to the code patch. What if it's related to multiple patch? Should we split or
> add doc changes to every related patches? Just a bit confused. Maybe you can give
> me some general guidance so that we will be on the same page.
The doc must be updated in each patch.
Sometimes, the same line is updated to add a word related to the patch.
> > - the addition in the release notes is not sorted
>
> Not very clear on this. The change is put in the bottom. Is there any sorting
> rules?
Read the comment at the beginning of the section, it explains
how things must be sorted:
Suggested order in release notes items:
* Core libs (EAL, mempool, ring, mbuf, buses)
* Device abstraction libs and PMDs (ordered alphabetically by vendor name)
- ethdev (lib, PMDs)
- cryptodev (lib, PMDs)
- eventdev (lib, PMDs)
- etc
* Other libs
* Apps, Examples, Tools (if significant)
vhost is usually at the end of ethdev PMDs.
> > Last question while at it, why having the API documentation
> > in the vhost guide (rst file)?
> > Doxygen is not enough to describe the functions?
>
> Good point. To be honest, I have not thought about it :P
>
> I think it could be moved to the doxygen later (maybe in another patch). The only
> concern of mine is some API description in the vhost guide is a bit long.
So you can improve doxygen and remove this part of the guide.
The guide should be an overview, a tutorial and an internal design reference.
> @Maxime What do you think?
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost
2021-07-23 7:25 ` Thomas Monjalon
@ 2021-07-23 7:34 ` Xia, Chenbo
2021-07-23 7:39 ` Thomas Monjalon
0 siblings, 1 reply; 70+ messages in thread
From: Xia, Chenbo @ 2021-07-23 7:34 UTC (permalink / raw)
To: Thomas Monjalon, maxime.coquelin
Cc: Jiang, Cheng1, dev, Hu, Jiayu, Yang, YvonneX, david.marchand,
Yigit, Ferruh
Hi Thomas,
> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Friday, July 23, 2021 3:25 PM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: Jiang, Cheng1 <cheng1.jiang@intel.com>; dev@dpdk.org; Hu, Jiayu
> <jiayu.hu@intel.com>; Yang, YvonneX <yvonnex.yang@intel.com>;
> david.marchand@redhat.com; Yigit, Ferruh <ferruh.yigit@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async
> vhost
>
> 23/07/2021 07:06, Xia, Chenbo:
> > From: Thomas Monjalon <thomas@monjalon.net>
> > > 22/07/2021 07:07, Xia, Chenbo:
> > > > From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> > > > > When the guest memory is hotplugged, the vhost application which
> > > > > enables DMA acceleration must stop DMA transfers before the vhost
> > > > > re-maps the guest memory.
> > > > >
> > > > > This patch set is to provide an unsafe API to drain inflight pkts
> > > > > which are submitted to DMA engine in vhost async data path, and
> > > > > notify the vhost application of stopping DMA transfers. And enable it
> > > > > in vhost example.
> > > >
> > > > Series applied to next-virtio/main. Thanks
> > >
> > > I cannot pull this series in main branch.
> > >
> > > There is a compilation error seen on Arm cross-compilation:
> > >
> > > examples/vhost/main.c:1493:51: error: assignment to 'int32_t (*)(int,
> > > uint16_t, struct rte_vhost_async_desc *, struct rte_vhost_async_status *,
> > > uint16_t)' {aka 'int (*)(int, short unsigned int, struct
> > > rte_vhost_async_desc *, struct rte_vhost_async_status *, short unsigned
> int)'}
> > > from incompatible pointer type 'uint32_t (*)(int, uint16_t, struct
> > > rte_vhost_async_desc *, struct rte_vhost_async_status *, uint16_t)' {aka
> > > 'unsigned int (*)(int, short unsigned int, struct rte_vhost_async_desc *,
> > > struct rte_vhost_async_status *, short unsigned int)'} [-
> Werror=incompatible-
> > > pointer-types]
> > > 1493 | channel_ops.transfer_data =
> > > ioat_transfer_data_cb;
> > > | ^
> >
> > I see. @Cheng, please fix it in new version.
> >
> > >
> > > Other comments about the last patch:
> > > - it is updating doc out of the original patch doing the code changes
> > > - there is not even a reference to the code patch (Fixes: line)
> >
> > I think the doc patch could be combined with the code patch in the same
> series.
> > But personally, sometimes I am not very clear when doc patch should be split.
> > For example, in this case we can combine as the update in release note is
> related
> > only to the code patch. What if it's related to multiple patch? Should we
> split or
> > add doc changes to every related patches? Just a bit confused. Maybe you can
> give
> > me some general guidance so that we will be on the same page.
>
> The doc must be updated in each patch.
> Sometimes, the same line is updated to add a word related to the patch.
Thanks for the guidance!
>
> > > - the addition in the release notes is not sorted
> >
> > Not very clear on this. The change is put in the bottom. Is there any
> sorting
> > rules?
>
> Read the comment at the beginning of the section, it explains
> how things must be sorted:
>
> Suggested order in release notes items:
> * Core libs (EAL, mempool, ring, mbuf, buses)
> * Device abstraction libs and PMDs (ordered alphabetically by vendor name)
> - ethdev (lib, PMDs)
> - cryptodev (lib, PMDs)
> - eventdev (lib, PMDs)
> - etc
> * Other libs
> * Apps, Examples, Tools (if significant)
>
> vhost is usually at the end of ethdev PMDs.
Oops.. I should notice it..
>
> > > Last question while at it, why having the API documentation
> > > in the vhost guide (rst file)?
> > > Doxygen is not enough to describe the functions?
> >
> > Good point. To be honest, I have not thought about it :P
> >
> > I think it could be moved to the doxygen later (maybe in another patch). The
> only
> > concern of mine is some API description in the vhost guide is a bit long.
>
> So you can improve doxygen and remove this part of the guide.
> The guide should be an overview, a tutorial and an internal design reference.
Make sense to me. For this patch, I suggest to keep the api doc in vhost guide.
Then I will send a patch to move them all if we all agree on this.
Thanks,
Chenbo
>
> > @Maxime What do you think?
>
>
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost
2021-07-23 7:34 ` Xia, Chenbo
@ 2021-07-23 7:39 ` Thomas Monjalon
2021-07-23 8:03 ` Xia, Chenbo
0 siblings, 1 reply; 70+ messages in thread
From: Thomas Monjalon @ 2021-07-23 7:39 UTC (permalink / raw)
To: maxime.coquelin, Xia, Chenbo
Cc: Jiang, Cheng1, dev, Hu, Jiayu, Yang, YvonneX, david.marchand,
Yigit, Ferruh
23/07/2021 09:34, Xia, Chenbo:
> From: Thomas Monjalon <thomas@monjalon.net>
> > 23/07/2021 07:06, Xia, Chenbo:
> > > From: Thomas Monjalon <thomas@monjalon.net>
> > > > 22/07/2021 07:07, Xia, Chenbo:
> > > > > From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> > > > > > When the guest memory is hotplugged, the vhost application which
> > > > > > enables DMA acceleration must stop DMA transfers before the vhost
> > > > > > re-maps the guest memory.
> > > > > >
> > > > > > This patch set is to provide an unsafe API to drain inflight pkts
> > > > > > which are submitted to DMA engine in vhost async data path, and
> > > > > > notify the vhost application of stopping DMA transfers. And enable it
> > > > > > in vhost example.
> > > > >
> > > > > Series applied to next-virtio/main. Thanks
> > > >
> > > > I cannot pull this series in main branch.
> > > >
> > > > There is a compilation error seen on Arm cross-compilation:
> > > >
> > > > examples/vhost/main.c:1493:51: error: assignment to 'int32_t (*)(int,
> > > > uint16_t, struct rte_vhost_async_desc *, struct rte_vhost_async_status *,
> > > > uint16_t)' {aka 'int (*)(int, short unsigned int, struct
> > > > rte_vhost_async_desc *, struct rte_vhost_async_status *, short unsigned
> > int)'}
> > > > from incompatible pointer type 'uint32_t (*)(int, uint16_t, struct
> > > > rte_vhost_async_desc *, struct rte_vhost_async_status *, uint16_t)' {aka
> > > > 'unsigned int (*)(int, short unsigned int, struct rte_vhost_async_desc *,
> > > > struct rte_vhost_async_status *, short unsigned int)'} [-
> > Werror=incompatible-
> > > > pointer-types]
> > > > 1493 | channel_ops.transfer_data =
> > > > ioat_transfer_data_cb;
> > > > | ^
> > >
> > > I see. @Cheng, please fix it in new version.
> > >
> > > >
> > > > Other comments about the last patch:
> > > > - it is updating doc out of the original patch doing the code changes
> > > > - there is not even a reference to the code patch (Fixes: line)
> > >
> > > I think the doc patch could be combined with the code patch in the same
> > series.
> > > But personally, sometimes I am not very clear when doc patch should be split.
> > > For example, in this case we can combine as the update in release note is
> > related
> > > only to the code patch. What if it's related to multiple patch? Should we
> > split or
> > > add doc changes to every related patches? Just a bit confused. Maybe you can
> > give
> > > me some general guidance so that we will be on the same page.
> >
> > The doc must be updated in each patch.
> > Sometimes, the same line is updated to add a word related to the patch.
>
> Thanks for the guidance!
>
> >
> > > > - the addition in the release notes is not sorted
> > >
> > > Not very clear on this. The change is put in the bottom. Is there any
> > sorting
> > > rules?
> >
> > Read the comment at the beginning of the section, it explains
> > how things must be sorted:
> >
> > Suggested order in release notes items:
> > * Core libs (EAL, mempool, ring, mbuf, buses)
> > * Device abstraction libs and PMDs (ordered alphabetically by vendor name)
> > - ethdev (lib, PMDs)
> > - cryptodev (lib, PMDs)
> > - eventdev (lib, PMDs)
> > - etc
> > * Other libs
> > * Apps, Examples, Tools (if significant)
> >
> > vhost is usually at the end of ethdev PMDs.
>
> Oops.. I should notice it..
>
> >
> > > > Last question while at it, why having the API documentation
> > > > in the vhost guide (rst file)?
> > > > Doxygen is not enough to describe the functions?
> > >
> > > Good point. To be honest, I have not thought about it :P
> > >
> > > I think it could be moved to the doxygen later (maybe in another patch). The
> > only
> > > concern of mine is some API description in the vhost guide is a bit long.
> >
> > So you can improve doxygen and remove this part of the guide.
> > The guide should be an overview, a tutorial and an internal design reference.
>
> Make sense to me. For this patch, I suggest to keep the api doc in vhost guide.
Yes of course, don't change everything for this patch :)
> Then I will send a patch to move them all if we all agree on this.
Thank you.
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost
2021-07-23 7:39 ` Thomas Monjalon
@ 2021-07-23 8:03 ` Xia, Chenbo
2021-07-23 8:57 ` Thomas Monjalon
0 siblings, 1 reply; 70+ messages in thread
From: Xia, Chenbo @ 2021-07-23 8:03 UTC (permalink / raw)
To: Thomas Monjalon, maxime.coquelin
Cc: Jiang, Cheng1, dev, Hu, Jiayu, Yang, YvonneX, david.marchand,
Yigit, Ferruh
Hi Thomas,
> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Friday, July 23, 2021 3:40 PM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: Jiang, Cheng1 <cheng1.jiang@intel.com>; dev@dpdk.org; Hu, Jiayu
> <jiayu.hu@intel.com>; Yang, YvonneX <yvonnex.yang@intel.com>;
> david.marchand@redhat.com; Yigit, Ferruh <ferruh.yigit@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async
> vhost
>
> 23/07/2021 09:34, Xia, Chenbo:
> > From: Thomas Monjalon <thomas@monjalon.net>
> > > 23/07/2021 07:06, Xia, Chenbo:
> > > > From: Thomas Monjalon <thomas@monjalon.net>
> > > > > 22/07/2021 07:07, Xia, Chenbo:
> > > > > > From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> > > > > > > When the guest memory is hotplugged, the vhost application which
> > > > > > > enables DMA acceleration must stop DMA transfers before the vhost
> > > > > > > re-maps the guest memory.
> > > > > > >
[...]
> > > Read the comment at the beginning of the section, it explains
> > > how things must be sorted:
> > >
> > > Suggested order in release notes items:
> > > * Core libs (EAL, mempool, ring, mbuf, buses)
> > > * Device abstraction libs and PMDs (ordered alphabetically by vendor
> name)
> > > - ethdev (lib, PMDs)
> > > - cryptodev (lib, PMDs)
> > > - eventdev (lib, PMDs)
> > > - etc
> > > * Other libs
> > > * Apps, Examples, Tools (if significant)
> > >
> > > vhost is usually at the end of ethdev PMDs.
> >
> > Oops.. I should notice it..
I want to make clear for this. The release note is a vhost lib update which belongs
to '* Other libs '. Since there're no app/example/tool change, it just happens to be
the last item..
Thanks,
Chenbo
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v8 0/4] vhost: handle memory hotplug for async vhost
2021-06-02 4:28 [dpdk-dev] [PATCH 0/2] vhost: handle memory hotplug for async vhost Cheng Jiang
` (7 preceding siblings ...)
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
@ 2021-07-23 8:09 ` Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 1/4] vhost: fix async vhost ops return type Cheng Jiang
` (4 more replies)
8 siblings, 5 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-23 8:09 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch set is to provide an unsafe API to drain inflight pkts
which are submitted to DMA engine in vhost async data path, and
notify the vhost application of stopping DMA transfers. And enable it
in vhost example.
v8:
* updated doc in the code patch
* fix a compile error in cross-compilation
v7:
* rebased on the latest codes
* improved commit log
v6:
* removed unnecessary args for the new API
* improved variable names and function names
* added enable notification in set_mem_table
* fixed vhost example queue clear process
v5:
* added fixes in 'vhost: fix async vhost ops return type'
* improved git log, variable names and logs
v4:
* rebased on the latest codes
v3:
* added a patch to fix async ops return type
* fixed async ops fail handler
* updated the doc
v2:
* changed the patch structure
Cheng Jiang (3):
vhost: fix async vhost ops return type
vhost: add unsafe API to clear packets in async vhost
examples/vhost: handle memory hotplug for async vhost
Jiayu Hu (1):
vhost: handle memory hotplug for async vhost
doc/guides/prog_guide/vhost_lib.rst | 5 +
doc/guides/rel_notes/release_21_08.rst | 5 +
examples/vhost/ioat.c | 4 +-
examples/vhost/ioat.h | 8 +-
examples/vhost/main.c | 55 ++++++++-
examples/vhost/main.h | 1 +
lib/vhost/rte_vhost_async.h | 30 ++++-
lib/vhost/version.map | 1 +
lib/vhost/vhost_user.c | 16 +++
lib/vhost/virtio_net.c | 152 ++++++++++++++++++++-----
10 files changed, 234 insertions(+), 43 deletions(-)
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v8 1/4] vhost: fix async vhost ops return type
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 0/4] " Cheng Jiang
@ 2021-07-23 8:09 ` Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 2/4] vhost: add unsafe API to clear packets in async vhost Cheng Jiang
` (3 subsequent siblings)
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-23 8:09 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia
Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang, stable
The async vhost callback ops should return negative value when there
are something wrong in the callback, so the return type should be
changed into int32_t. The issue in vhost example is also fixed.
Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
Fixes: 819a71685826 ("vhost: fix async callback return type")
Fixes: 6b3c81db8bb7 ("vhost: simplify async copy completion")
Fixes: abec60e7115d ("examples/vhost: support vhost async data path")
Fixes: 6e9a9d2a02ae ("examples/vhost: fix ioat dependency")
Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable@dpdk.org
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
examples/vhost/ioat.c | 4 +--
examples/vhost/ioat.h | 8 ++---
lib/vhost/rte_vhost_async.h | 8 ++---
lib/vhost/virtio_net.c | 61 ++++++++++++++++++++++++++++++++-----
4 files changed, 63 insertions(+), 18 deletions(-)
diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index 2a2c2d7202..457f8171f0 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -122,7 +122,7 @@ open_ioat(const char *value)
return ret;
}
-uint32_t
+int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count)
@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
return i_desc;
}
-uint32_t
+int32_t
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets)
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 1aa28ed6a3..62e163c585 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -27,12 +27,12 @@ struct dma_for_vhost {
#ifdef RTE_RAW_IOAT
int open_ioat(const char *value);
-uint32_t
+int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data, uint16_t count);
-uint32_t
+int32_t
ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
@@ -42,7 +42,7 @@ static int open_ioat(const char *value __rte_unused)
return -1;
}
-static uint32_t
+static int32_t
ioat_transfer_data_cb(int vid __rte_unused, uint16_t queue_id __rte_unused,
struct rte_vhost_async_desc *descs __rte_unused,
struct rte_vhost_async_status *opaque_data __rte_unused,
@@ -51,7 +51,7 @@ ioat_transfer_data_cb(int vid __rte_unused, uint16_t queue_id __rte_unused,
return -1;
}
-static uint32_t
+static int32_t
ioat_check_completed_copies_cb(int vid __rte_unused,
uint16_t queue_id __rte_unused,
struct rte_vhost_async_status *opaque_data __rte_unused,
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 69ec66bba5..02d012ae23 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -59,9 +59,9 @@ struct rte_vhost_async_channel_ops {
* @param count
* number of elements in the "descs" array
* @return
- * number of descs processed
+ * number of descs processed, negative value means error
*/
- uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+ int32_t (*transfer_data)(int vid, uint16_t queue_id,
struct rte_vhost_async_desc *descs,
struct rte_vhost_async_status *opaque_data,
uint16_t count);
@@ -76,9 +76,9 @@ struct rte_vhost_async_channel_ops {
* @param max_packets
* max number of packets could be completed
* @return
- * number of async descs completed
+ * number of async descs completed, negative value means error
*/
- uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+ int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
struct rte_vhost_async_status *opaque_data,
uint16_t max_packets);
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 6e5d82c1a8..3ab5229f76 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1644,6 +1644,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
struct async_inflight_info *pkts_info = vq->async_pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
uint32_t num_async_pkts = 0, num_done_pkts = 0;
+ int32_t n_xfer;
struct {
uint16_t pkt_idx;
uint16_t last_avail_idx;
@@ -1724,8 +1725,17 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
+ n_xfer = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
@@ -1748,8 +1758,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
}
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
- queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
@@ -1996,6 +2013,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
uint16_t async_descs_idx = 0;
uint16_t num_buffers;
uint16_t num_descs;
+ int32_t n_xfer;
struct rte_vhost_iov_iter *it_pool = vq->it_pool;
struct iovec *vec_pool = vq->vec_pool;
@@ -2078,8 +2096,17 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
*/
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
- tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid,
+ queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
segs_await = 0;
@@ -2101,7 +2128,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
} while (pkt_idx < count);
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
@@ -2188,6 +2223,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
uint16_t start_idx, pkts_idx, vq_size;
struct async_inflight_info *pkts_info;
uint16_t from, i;
+ int32_t n_cpl;
if (!dev)
return 0;
@@ -2215,9 +2251,18 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
- if (count > vq->async_last_pkts_n)
- n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+ if (count > vq->async_last_pkts_n) {
+ n_cpl = vq->async_ops.check_completed_copies(vid,
queue_id, 0, count - vq->async_last_pkts_n);
+ if (n_cpl >= 0) {
+ n_pkts_cpl = n_cpl;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to check completed copies for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts_cpl = 0;
+ }
+ }
n_pkts_cpl += vq->async_last_pkts_n;
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v8 2/4] vhost: add unsafe API to clear packets in async vhost
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 0/4] " Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 1/4] vhost: fix async vhost ops return type Cheng Jiang
@ 2021-07-23 8:09 ` Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 3/4] vhost: handle memory hotplug for " Cheng Jiang
` (2 subsequent siblings)
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-23 8:09 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
Applications need to stop DMA transfers and finish all the inflight
packets when in VM memory hot-plug case and async vhost is used. This
patch is to provide an unsafe API to clear inflight packets which
are submitted to DMA engine in vhost async data path. Update the
program guide and release notes for virtqueue inflight packets clear
API in vhost lib.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
doc/guides/prog_guide/vhost_lib.rst | 5 ++
doc/guides/rel_notes/release_21_08.rst | 5 ++
lib/vhost/rte_vhost_async.h | 22 ++++++
lib/vhost/version.map | 1 +
lib/vhost/virtio_net.c | 93 +++++++++++++++++++-------
5 files changed, 102 insertions(+), 24 deletions(-)
diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index 70ce4974df..8874033165 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -305,6 +305,11 @@ The following is an overview of some key Vhost API functions:
This function returns the amount of in-flight packets for the vhost
queue using async acceleration.
+* ``rte_vhost_clear_queue_thread_unsafe(vid, queue_id, **pkts, count)``
+
+ Clear inflight packets which are submitted to DMA engine in vhost async data
+ path. Completed packets are returned to applications through ``pkts``.
+
Vhost-user Implementations
--------------------------
diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
index 543e93ff1d..d9c4cc5df0 100644
--- a/doc/guides/rel_notes/release_21_08.rst
+++ b/doc/guides/rel_notes/release_21_08.rst
@@ -155,6 +155,11 @@ New Features
The experimental PMD power management API now supports managing
multiple Ethernet Rx queues per lcore.
+* **Added inflight packets clear API in vhost library.**
+
+ Added an API which can clear the inflight packets submitted to DMA
+ engine in vhost async data path.
+
Removed Items
-------------
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 02d012ae23..b25ff446f7 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -246,4 +246,26 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
__rte_experimental
int rte_vhost_async_get_inflight(int vid, uint16_t queue_id);
+/**
+ * This function checks async completion status and clear packets for
+ * a specific vhost device queue. Packets which are inflight will be
+ * returned in an array.
+ *
+ * @note This function does not perform any locking
+ *
+ * @param vid
+ * ID of vhost device to clear data
+ * @param queue_id
+ * Queue id to clear data
+ * @param pkts
+ * Blank array to get return packet pointer
+ * @param count
+ * Size of the packet array
+ * @return
+ * Number of packets returned
+ */
+__rte_experimental
+uint16_t rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count);
+
#endif /* _RTE_VHOST_ASYNC_H_ */
diff --git a/lib/vhost/version.map b/lib/vhost/version.map
index e0c89646e8..e2504ba657 100644
--- a/lib/vhost/version.map
+++ b/lib/vhost/version.map
@@ -84,4 +84,5 @@ EXPERIMENTAL {
rte_vhost_async_get_inflight;
rte_vhost_async_channel_register_thread_unsafe;
rte_vhost_async_channel_unregister_thread_unsafe;
+ rte_vhost_clear_queue_thread_unsafe;
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 3ab5229f76..8549afbbe1 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2214,10 +2214,10 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
} while (nr_left > 0);
}
-uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+static __rte_always_inline uint16_t
+vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
- struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
uint16_t start_idx, pkts_idx, vq_size;
@@ -2225,26 +2225,8 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
uint16_t from, i;
int32_t n_cpl;
- if (!dev)
- return 0;
-
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
- rte_spinlock_lock(&vq->access_lock);
-
pkts_idx = vq->async_pkts_idx % vq->size;
pkts_info = vq->async_pkts_info;
vq_size = vq->size;
@@ -2252,7 +2234,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq_size, vq->async_pkts_inflight_n);
if (count > vq->async_last_pkts_n) {
- n_cpl = vq->async_ops.check_completed_copies(vid,
+ n_cpl = vq->async_ops.check_completed_copies(dev->vid,
queue_id, 0, count - vq->async_last_pkts_n);
if (n_cpl >= 0) {
n_pkts_cpl = n_cpl;
@@ -2268,7 +2250,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
if (unlikely(n_pkts_put == 0)) {
vq->async_last_pkts_n = n_pkts_cpl;
- goto done;
+ return 0;
}
if (vq_is_packed(dev)) {
@@ -2310,10 +2292,73 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
}
}
-done:
+ return n_pkts_put;
+}
+
+uint16_t
+rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_cpl = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
rte_spinlock_unlock(&vq->access_lock);
- return n_pkts_put;
+ return n_pkts_cpl;
+}
+
+uint16_t
+rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_cpl = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
+ return n_pkts_cpl;
}
static __rte_always_inline uint32_t
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v8 3/4] vhost: handle memory hotplug for async vhost
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 0/4] " Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 1/4] vhost: fix async vhost ops return type Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 2/4] vhost: add unsafe API to clear packets in async vhost Cheng Jiang
@ 2021-07-23 8:09 ` Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 4/4] examples/vhost: " Cheng Jiang
2021-07-23 9:08 ` [dpdk-dev] [PATCH v8 0/4] vhost: " Xia, Chenbo
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-23 8:09 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang
From: Jiayu Hu <jiayu.hu@intel.com>
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
This patch is to notify the vhost application of stopping DMA
transfers.
Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/vhost/vhost_user.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 31300e194f..433f412fa8 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -1248,6 +1248,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
int numa_node = SOCKET_ID_ANY;
uint64_t mmap_offset;
uint32_t i;
+ bool async_notify = false;
if (validate_msg_fds(msg, memory->nregions) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
@@ -1275,6 +1276,16 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
}
+
+ /* notify the vhost application to stop DMA transfers */
+ if (dev->async_copy && dev->notify_ops->vring_state_changed) {
+ for (i = 0; i < dev->nr_vring; i++) {
+ dev->notify_ops->vring_state_changed(dev->vid,
+ i, 0);
+ }
+ async_notify = true;
+ }
+
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
@@ -1371,6 +1382,11 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
dump_guest_pages(dev);
+ if (async_notify) {
+ for (i = 0; i < dev->nr_vring; i++)
+ dev->notify_ops->vring_state_changed(dev->vid, i, 1);
+ }
+
return RTE_VHOST_MSG_RESULT_OK;
free_mem_table:
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* [dpdk-dev] [PATCH v8 4/4] examples/vhost: handle memory hotplug for async vhost
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 0/4] " Cheng Jiang
` (2 preceding siblings ...)
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 3/4] vhost: handle memory hotplug for " Cheng Jiang
@ 2021-07-23 8:09 ` Cheng Jiang
2021-07-23 9:08 ` [dpdk-dev] [PATCH v8 0/4] vhost: " Xia, Chenbo
4 siblings, 0 replies; 70+ messages in thread
From: Cheng Jiang @ 2021-07-23 8:09 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang
When the guest memory is hotplugged, the vhost application which
enables DMA acceleration must stop DMA transfers before the vhost
re-maps the guest memory.
To accomplish that, we need to do these changes in the vhost sample:
1. add inflight packets count.
2. add vring_state_changed() callback.
3. add inflight packets clear process in destroy_device() and
vring_state_changed().
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
examples/vhost/main.c | 55 +++++++++++++++++++++++++++++++++++++++++--
examples/vhost/main.h | 1 +
2 files changed, 54 insertions(+), 2 deletions(-)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 9cd855a696..bc3d71c898 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -851,8 +851,11 @@ complete_async_pkts(struct vhost_dev *vdev)
complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
- if (complete_count)
+ if (complete_count) {
free_pkts(p_cpl, complete_count);
+ __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
+ }
+
}
static __rte_always_inline void
@@ -895,6 +898,7 @@ drain_vhost(struct vhost_dev *vdev)
complete_async_pkts(vdev);
ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST);
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1226,6 +1230,9 @@ drain_eth_rx(struct vhost_dev *vdev)
enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
VIRTIO_RXQ, pkts, rx_count,
m_cpu_cpl, &cpu_cpl_nr);
+ __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr,
+ __ATOMIC_SEQ_CST);
+
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
@@ -1397,8 +1404,19 @@ destroy_device(int vid)
"(%d) device has been removed from data core\n",
vdev->vid);
- if (async_vhost_driver)
+ if (async_vhost_driver) {
+ uint16_t n_pkt = 0;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+ while (vdev->pkts_inflight) {
+ n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ,
+ m_cpl, vdev->pkts_inflight);
+ free_pkts(m_cpl, n_pkt);
+ __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+ }
+
rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
+ }
rte_free(vdev);
}
@@ -1487,6 +1505,38 @@ new_device(int vid)
return 0;
}
+static int
+vring_state_changed(int vid, uint16_t queue_id, int enable)
+{
+ struct vhost_dev *vdev = NULL;
+
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (vdev->vid == vid)
+ break;
+ }
+ if (!vdev)
+ return -1;
+
+ if (queue_id != VIRTIO_RXQ)
+ return 0;
+
+ if (async_vhost_driver) {
+ if (!enable) {
+ uint16_t n_pkt = 0;
+ struct rte_mbuf *m_cpl[vdev->pkts_inflight];
+
+ while (vdev->pkts_inflight) {
+ n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id,
+ m_cpl, vdev->pkts_inflight);
+ free_pkts(m_cpl, n_pkt);
+ __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST);
+ }
+ }
+ }
+
+ return 0;
+}
+
/*
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
@@ -1495,6 +1545,7 @@ static const struct vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
+ .vring_state_changed = vring_state_changed,
};
/*
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 0ccdce4b4a..e7b1ac60a6 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -51,6 +51,7 @@ struct vhost_dev {
uint64_t features;
size_t hdr_len;
uint16_t nr_vrings;
+ uint16_t pkts_inflight;
struct rte_vhost_memory *mem;
struct device_statistics stats;
TAILQ_ENTRY(vhost_dev) global_vdev_entry;
--
2.29.2
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost
2021-07-23 8:03 ` Xia, Chenbo
@ 2021-07-23 8:57 ` Thomas Monjalon
0 siblings, 0 replies; 70+ messages in thread
From: Thomas Monjalon @ 2021-07-23 8:57 UTC (permalink / raw)
To: maxime.coquelin, Xia, Chenbo
Cc: Jiang, Cheng1, dev, Hu, Jiayu, Yang, YvonneX, david.marchand,
Yigit, Ferruh
23/07/2021 10:03, Xia, Chenbo:
> From: Thomas Monjalon <thomas@monjalon.net>
> > 23/07/2021 09:34, Xia, Chenbo:
> > > From: Thomas Monjalon <thomas@monjalon.net>
> > > > 23/07/2021 07:06, Xia, Chenbo:
> > > > > From: Thomas Monjalon <thomas@monjalon.net>
> > > > Read the comment at the beginning of the section, it explains
> > > > how things must be sorted:
> > > >
> > > > Suggested order in release notes items:
> > > > * Core libs (EAL, mempool, ring, mbuf, buses)
> > > > * Device abstraction libs and PMDs (ordered alphabetically by vendor
> > name)
> > > > - ethdev (lib, PMDs)
> > > > - cryptodev (lib, PMDs)
> > > > - eventdev (lib, PMDs)
> > > > - etc
> > > > * Other libs
> > > > * Apps, Examples, Tools (if significant)
> > > >
> > > > vhost is usually at the end of ethdev PMDs.
> > >
> > > Oops.. I should notice it..
>
> I want to make clear for this. The release note is a vhost lib update which belongs
> to '* Other libs '. Since there're no app/example/tool change, it just happens to be
> the last item..
vhost is an exception.
Most of the time it is about networking vhost,
so it fits better at the end of ethdev lib and PMDs in my opinion.
In the case there are net+crypto changes,
it can still be between ethdev and crypto.
^ permalink raw reply [flat|nested] 70+ messages in thread
* Re: [dpdk-dev] [PATCH v8 0/4] vhost: handle memory hotplug for async vhost
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 0/4] " Cheng Jiang
` (3 preceding siblings ...)
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 4/4] examples/vhost: " Cheng Jiang
@ 2021-07-23 9:08 ` Xia, Chenbo
4 siblings, 0 replies; 70+ messages in thread
From: Xia, Chenbo @ 2021-07-23 9:08 UTC (permalink / raw)
To: Jiang, Cheng1, maxime.coquelin
Cc: dev, Hu, Jiayu, Yang, YvonneX, Thomas Monjalon
> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> Sent: Friday, July 23, 2021 4:10 PM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>
> Subject: [PATCH v8 0/4] vhost: handle memory hotplug for async vhost
>
> When the guest memory is hotplugged, the vhost application which
> enables DMA acceleration must stop DMA transfers before the vhost
> re-maps the guest memory.
>
> This patch set is to provide an unsafe API to drain inflight pkts
> which are submitted to DMA engine in vhost async data path, and
> notify the vhost application of stopping DMA transfers. And enable it
> in vhost example.
>
> v8:
> * updated doc in the code patch
> * fix a compile error in cross-compilation
> v7:
> * rebased on the latest codes
> * improved commit log
> v6:
> * removed unnecessary args for the new API
> * improved variable names and function names
> * added enable notification in set_mem_table
> * fixed vhost example queue clear process
> v5:
> * added fixes in 'vhost: fix async vhost ops return type'
> * improved git log, variable names and logs
> v4:
> * rebased on the latest codes
> v3:
> * added a patch to fix async ops return type
> * fixed async ops fail handler
> * updated the doc
> v2:
> * changed the patch structure
>
> Cheng Jiang (3):
> vhost: fix async vhost ops return type
> vhost: add unsafe API to clear packets in async vhost
> examples/vhost: handle memory hotplug for async vhost
>
> Jiayu Hu (1):
> vhost: handle memory hotplug for async vhost
>
> doc/guides/prog_guide/vhost_lib.rst | 5 +
> doc/guides/rel_notes/release_21_08.rst | 5 +
> examples/vhost/ioat.c | 4 +-
> examples/vhost/ioat.h | 8 +-
> examples/vhost/main.c | 55 ++++++++-
> examples/vhost/main.h | 1 +
> lib/vhost/rte_vhost_async.h | 30 ++++-
> lib/vhost/version.map | 1 +
> lib/vhost/vhost_user.c | 16 +++
> lib/vhost/virtio_net.c | 152 ++++++++++++++++++++-----
> 10 files changed, 234 insertions(+), 43 deletions(-)
>
> --
> 2.29.2
Series applied to next-virtio/main with the release doc fixed as Thomas suggested.
Thanks.
^ permalink raw reply [flat|nested] 70+ messages in thread
end of thread, other threads:[~2021-07-23 9:08 UTC | newest]
Thread overview: 70+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-02 4:28 [dpdk-dev] [PATCH 0/2] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-06-02 4:28 ` [dpdk-dev] [PATCH 1/2] vhost: add unsafe API to drain pkts in " Cheng Jiang
2021-06-07 13:46 ` Maxime Coquelin
2021-06-08 5:26 ` Jiang, Cheng1
2021-06-02 4:28 ` [dpdk-dev] [PATCH 2/2] vhost: handle memory hotplug for " Cheng Jiang
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 0/3] " Cheng Jiang
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in " Cheng Jiang
2021-07-05 14:58 ` Pai G, Sunil
2021-07-07 14:02 ` Jiang, Cheng1
2021-07-08 7:15 ` Pai G, Sunil
2021-07-12 6:31 ` Jiang, Cheng1
2021-07-06 14:08 ` Maxime Coquelin
2021-07-08 13:46 ` Hu, Jiayu
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 2/3] examples/vhost: handle memory hotplug for " Cheng Jiang
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 3/3] vhost: " Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 0/5] " Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 2/5] vhost: add unsafe API to drain pkts in async vhost Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 3/5] vhost: handle memory hotplug for " Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 4/5] examples/vhost: " Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 5/5] doc: update doc for try drain API in vhost lib Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-16 5:36 ` Xia, Chenbo
2021-07-16 5:58 ` Jiang, Cheng1
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 2/5] vhost: add unsafe API to drain pkts in async vhost Cheng Jiang
2021-07-16 8:56 ` Xia, Chenbo
2021-07-19 3:28 ` Jiang, Cheng1
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 3/5] vhost: handle memory hotplug for " Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 4/5] examples/vhost: " Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 5/5] doc: update doc for try drain API in vhost lib Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 2/5] vhost: add unsafe API to drain pkts in async vhost Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 3/5] vhost: handle memory hotplug for " Cheng Jiang
2021-07-19 5:19 ` Xia, Chenbo
2021-07-19 7:56 ` Hu, Jiayu
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 4/5] examples/vhost: " Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 5/5] doc: update doc for try drain API in vhost lib Cheng Jiang
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-21 14:20 ` Maxime Coquelin
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 2/5] vhost: add unsafe API to clear packets in async vhost Cheng Jiang
2021-07-21 14:23 ` Maxime Coquelin
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 3/5] vhost: handle memory hotplug for " Cheng Jiang
2021-07-21 14:32 ` Maxime Coquelin
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 4/5] examples/vhost: " Cheng Jiang
2021-07-21 14:37 ` Maxime Coquelin
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 5/5] doc: update doc for inflight packets clear API in vhost lib Cheng Jiang
2021-07-21 14:37 ` Maxime Coquelin
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 2/5] vhost: add unsafe API to clear packets in async vhost Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 3/5] vhost: handle memory hotplug for " Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 4/5] examples/vhost: " Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 5/5] doc: update doc for queue clear API in vhost lib Cheng Jiang
2021-07-22 5:07 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Xia, Chenbo
2021-07-22 16:12 ` Thomas Monjalon
2021-07-23 5:06 ` Xia, Chenbo
2021-07-23 7:25 ` Thomas Monjalon
2021-07-23 7:34 ` Xia, Chenbo
2021-07-23 7:39 ` Thomas Monjalon
2021-07-23 8:03 ` Xia, Chenbo
2021-07-23 8:57 ` Thomas Monjalon
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 0/4] " Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 1/4] vhost: fix async vhost ops return type Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 2/4] vhost: add unsafe API to clear packets in async vhost Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 3/4] vhost: handle memory hotplug for " Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 4/4] examples/vhost: " Cheng Jiang
2021-07-23 9:08 ` [dpdk-dev] [PATCH v8 0/4] vhost: " Xia, Chenbo
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).