From: Cheng Jiang <cheng1.jiang@intel.com>
To: maxime.coquelin@redhat.com, Chenbo.Xia@intel.com
Cc: dev@dpdk.org, jiayu.hu@intel.com, yvonnex.yang@intel.com,
Cheng Jiang <cheng1.jiang@intel.com>
Subject: [dpdk-dev] [PATCH v3 2/5] vhost: add unsafe API to drain pkts in async vhost
Date: Wed, 14 Jul 2021 09:01:06 +0000 [thread overview]
Message-ID: <20210714090109.18523-3-cheng1.jiang@intel.com> (raw)
In-Reply-To: <20210714090109.18523-1-cheng1.jiang@intel.com>
Applications need to stop DMA transfers and finish all the in-flight
pkts when in VM memory hot-plug case and async vhost is used. This
patch is to provide an unsafe API to drain in-flight pkts which are
submitted to DMA engine in vhost async data path.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
lib/vhost/rte_vhost_async.h | 24 ++++++++++
lib/vhost/version.map | 3 ++
lib/vhost/virtio_net.c | 90 +++++++++++++++++++++++++++----------
3 files changed, 94 insertions(+), 23 deletions(-)
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index bc81cd0caa..fd622631b2 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -193,4 +193,28 @@ __rte_experimental
uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count);
+/**
+ * This function checks async completion status and empty all pakcets
+ * for a specific vhost device queue. Packets which are inflight will
+ * be returned in an array.
+ *
+ * @note This function does not perform any locking
+ *
+ * @param vid
+ * id of vhost device to enqueue data
+ * @param queue_id
+ * queue id to enqueue data
+ * @param pkts
+ * blank array to get return packet pointer
+ * @param count
+ * size of the packet array
+ * @param times
+ * max number of poll attempts
+ * @return
+ * num of packets returned
+ */
+__rte_experimental
+uint16_t rte_vhost_try_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count, uint16_t times);
+
#endif /* _RTE_VHOST_ASYNC_H_ */
diff --git a/lib/vhost/version.map b/lib/vhost/version.map
index 9103a23cd4..b8fc8770dd 100644
--- a/lib/vhost/version.map
+++ b/lib/vhost/version.map
@@ -79,4 +79,7 @@ EXPERIMENTAL {
# added in 21.05
rte_vhost_get_negotiated_protocol_features;
+
+ # added in 21.08
+ rte_vhost_try_drain_queue_thread_unsafe;
};
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 8156796a46..9f541679b9 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2115,10 +2115,10 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
} while (nr_left > 0);
}
-uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+static __rte_always_inline uint16_t
+vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
- struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
uint16_t start_idx, pkts_idx, vq_size;
@@ -2126,26 +2126,8 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
uint16_t from, i;
int32_t n_poll;
- if (!dev)
- return 0;
-
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
- rte_spinlock_lock(&vq->access_lock);
-
pkts_idx = vq->async_pkts_idx % vq->size;
pkts_info = vq->async_pkts_info;
vq_size = vq->size;
@@ -2153,7 +2135,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq_size, vq->async_pkts_inflight_n);
if (count > vq->async_last_pkts_n) {
- n_poll = vq->async_ops.check_completed_copies(vid,
+ n_poll = vq->async_ops.check_completed_copies(dev->vid,
queue_id, 0, count - vq->async_last_pkts_n);
if (n_poll >= 0) {
n_pkts_cpl = n_poll;
@@ -2168,7 +2150,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
if (unlikely(n_pkts_put == 0)) {
vq->async_last_pkts_n = n_pkts_cpl;
- goto done;
+ return 0;
}
if (vq_is_packed(dev)) {
@@ -2207,12 +2189,74 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq->last_async_desc_idx_split += n_descs;
}
-done:
+ return n_pkts_put;
+}
+
+uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_put = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ n_pkts_put = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
rte_spinlock_unlock(&vq->access_lock);
return n_pkts_put;
}
+uint16_t rte_vhost_try_drain_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count, uint16_t times)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_cpl = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ while ((n_cpl < count) && times--)
+ n_cpl += vhost_poll_enqueue_completed(dev, queue_id, pkts + n_cpl, count);
+
+ return n_cpl;
+}
+
static __rte_always_inline uint32_t
virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count,
--
2.29.2
next prev parent reply other threads:[~2021-07-14 9:17 UTC|newest]
Thread overview: 70+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-02 4:28 [dpdk-dev] [PATCH 0/2] vhost: handle memory hotplug for " Cheng Jiang
2021-06-02 4:28 ` [dpdk-dev] [PATCH 1/2] vhost: add unsafe API to drain pkts in " Cheng Jiang
2021-06-07 13:46 ` Maxime Coquelin
2021-06-08 5:26 ` Jiang, Cheng1
2021-06-02 4:28 ` [dpdk-dev] [PATCH 2/2] vhost: handle memory hotplug for " Cheng Jiang
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 0/3] " Cheng Jiang
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 1/3] vhost: add unsafe API to drain pkts in " Cheng Jiang
2021-07-05 14:58 ` Pai G, Sunil
2021-07-07 14:02 ` Jiang, Cheng1
2021-07-08 7:15 ` Pai G, Sunil
2021-07-12 6:31 ` Jiang, Cheng1
2021-07-06 14:08 ` Maxime Coquelin
2021-07-08 13:46 ` Hu, Jiayu
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 2/3] examples/vhost: handle memory hotplug for " Cheng Jiang
2021-06-15 14:15 ` [dpdk-dev] [PATCH v2 3/3] vhost: " Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 0/5] " Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-14 9:01 ` Cheng Jiang [this message]
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 3/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 4/5] examples/vhost: " Cheng Jiang
2021-07-14 9:01 ` [dpdk-dev] [PATCH v3 5/5] doc: update doc for try drain API in vhost lib Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-16 5:36 ` Xia, Chenbo
2021-07-16 5:58 ` Jiang, Cheng1
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 2/5] vhost: add unsafe API to drain pkts in async vhost Cheng Jiang
2021-07-16 8:56 ` Xia, Chenbo
2021-07-19 3:28 ` Jiang, Cheng1
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 3/5] vhost: handle memory hotplug for " Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 4/5] examples/vhost: " Cheng Jiang
2021-07-16 2:59 ` [dpdk-dev] [PATCH v4 5/5] doc: update doc for try drain API in vhost lib Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 2/5] vhost: add unsafe API to drain pkts in async vhost Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 3/5] vhost: handle memory hotplug for " Cheng Jiang
2021-07-19 5:19 ` Xia, Chenbo
2021-07-19 7:56 ` Hu, Jiayu
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 4/5] examples/vhost: " Cheng Jiang
2021-07-16 7:24 ` [dpdk-dev] [PATCH v5 5/5] doc: update doc for try drain API in vhost lib Cheng Jiang
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-21 14:20 ` Maxime Coquelin
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 2/5] vhost: add unsafe API to clear packets in async vhost Cheng Jiang
2021-07-21 14:23 ` Maxime Coquelin
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 3/5] vhost: handle memory hotplug for " Cheng Jiang
2021-07-21 14:32 ` Maxime Coquelin
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 4/5] examples/vhost: " Cheng Jiang
2021-07-21 14:37 ` Maxime Coquelin
2021-07-19 8:10 ` [dpdk-dev] [PATCH v6 5/5] doc: update doc for inflight packets clear API in vhost lib Cheng Jiang
2021-07-21 14:37 ` Maxime Coquelin
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 1/5] vhost: fix async vhost ops return type Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 2/5] vhost: add unsafe API to clear packets in async vhost Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 3/5] vhost: handle memory hotplug for " Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 4/5] examples/vhost: " Cheng Jiang
2021-07-22 4:09 ` [dpdk-dev] [PATCH v7 5/5] doc: update doc for queue clear API in vhost lib Cheng Jiang
2021-07-22 5:07 ` [dpdk-dev] [PATCH v7 0/5] vhost: handle memory hotplug for async vhost Xia, Chenbo
2021-07-22 16:12 ` Thomas Monjalon
2021-07-23 5:06 ` Xia, Chenbo
2021-07-23 7:25 ` Thomas Monjalon
2021-07-23 7:34 ` Xia, Chenbo
2021-07-23 7:39 ` Thomas Monjalon
2021-07-23 8:03 ` Xia, Chenbo
2021-07-23 8:57 ` Thomas Monjalon
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 0/4] " Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 1/4] vhost: fix async vhost ops return type Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 2/4] vhost: add unsafe API to clear packets in async vhost Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 3/4] vhost: handle memory hotplug for " Cheng Jiang
2021-07-23 8:09 ` [dpdk-dev] [PATCH v8 4/4] examples/vhost: " Cheng Jiang
2021-07-23 9:08 ` [dpdk-dev] [PATCH v8 0/4] vhost: " Xia, Chenbo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210714090109.18523-3-cheng1.jiang@intel.com \
--to=cheng1.jiang@intel.com \
--cc=Chenbo.Xia@intel.com \
--cc=dev@dpdk.org \
--cc=jiayu.hu@intel.com \
--cc=maxime.coquelin@redhat.com \
--cc=yvonnex.yang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).