From: Jiayu Hu <jiayu.hu@intel.com>
To: dev@dpdk.org
Cc: maxime.coquelin@redhat.com, chenbo.xia@intel.com,
cheng1.jiang@intel.com, xingguang.he@intel.com,
Jiayu Hu <jiayu.hu@intel.com>,
stable@dpdk.org
Subject: [dpdk-dev] [PATCH] vhost: fix packed ring descriptor update in async enqueue
Date: Thu, 4 Nov 2021 06:19:30 -0400 [thread overview]
Message-ID: <1636021170-230805-1-git-send-email-jiayu.hu@intel.com> (raw)
For the packet using multiple descriptors, the flag of first
descriptor cannot be updated until finishing updating the flag
of remaining descriptors. However, if the packet's descriptor
index is wrapped, the first descriptor's flag is updated earlier
than the others.
Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable@dpdk.org
Signed-off-by: Jiayu Hu <jiayu.hu@intel.com>
---
lib/vhost/virtio_net.c | 122 ++++++++++++++++++-----------------------
1 file changed, 54 insertions(+), 68 deletions(-)
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index cef4bcf15c..b3d954aab4 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1549,60 +1549,6 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
return pkt_idx;
}
-static __rte_always_inline void
-vhost_update_used_packed(struct vhost_virtqueue *vq,
- struct vring_used_elem_packed *shadow_ring,
- uint16_t count)
-{
- int i;
- uint16_t used_idx = vq->last_used_idx;
- uint16_t head_idx = vq->last_used_idx;
- uint16_t head_flags = 0;
-
- if (count == 0)
- return;
-
- /* Split loop in two to save memory barriers */
- for (i = 0; i < count; i++) {
- vq->desc_packed[used_idx].id = shadow_ring[i].id;
- vq->desc_packed[used_idx].len = shadow_ring[i].len;
-
- used_idx += shadow_ring[i].count;
- if (used_idx >= vq->size)
- used_idx -= vq->size;
- }
-
- /* The ordering for storing desc flags needs to be enforced. */
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
-
- for (i = 0; i < count; i++) {
- uint16_t flags;
-
- if (vq->shadow_used_packed[i].len)
- flags = VRING_DESC_F_WRITE;
- else
- flags = 0;
-
- if (vq->used_wrap_counter) {
- flags |= VRING_DESC_F_USED;
- flags |= VRING_DESC_F_AVAIL;
- } else {
- flags &= ~VRING_DESC_F_USED;
- flags &= ~VRING_DESC_F_AVAIL;
- }
-
- if (i > 0) {
- vq->desc_packed[vq->last_used_idx].flags = flags;
- } else {
- head_idx = vq->last_used_idx;
- head_flags = flags;
- }
-
- vq_inc_last_used_packed(vq, shadow_ring[i].count);
- }
-
- vq->desc_packed[head_idx].flags = head_flags;
-}
static __rte_always_inline int
vhost_enqueue_async_packed(struct virtio_net *dev,
@@ -1819,23 +1765,63 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
uint16_t n_buffers)
{
struct vhost_async *async = vq->async;
- uint16_t nr_left = n_buffers;
- uint16_t from, to;
+ uint16_t from = async->last_buffer_idx_packed;
+ uint16_t used_idx = vq->last_used_idx;
+ uint16_t head_idx = vq->last_used_idx;
+ uint16_t head_flags = 0;
+ uint16_t i;
- do {
- from = async->last_buffer_idx_packed;
- to = (from + nr_left) % vq->size;
- if (to > from) {
- vhost_update_used_packed(vq, async->buffers_packed + from, to - from);
- async->last_buffer_idx_packed += nr_left;
- nr_left = 0;
+ /* Split loop in two to save memory barriers */
+ for (i = 0; i < n_buffers; i++) {
+ vq->desc_packed[used_idx].id = async->buffers_packed[from].id;
+ vq->desc_packed[used_idx].len = async->buffers_packed[from].len;
+
+ used_idx += async->buffers_packed[from].count;
+ if (used_idx >= vq->size)
+ used_idx -= vq->size;
+
+ from++;
+ if (from >= vq->size)
+ from = 0;
+ }
+
+ /* The ordering for storing desc flags needs to be enforced. */
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
+
+ from = async->last_buffer_idx_packed;
+
+ for (i = 0; i < n_buffers; i++) {
+ uint16_t flags;
+
+ if (async->buffers_packed[from].len)
+ flags = VRING_DESC_F_WRITE;
+ else
+ flags = 0;
+
+ if (vq->used_wrap_counter) {
+ flags |= VRING_DESC_F_USED;
+ flags |= VRING_DESC_F_AVAIL;
} else {
- vhost_update_used_packed(vq, async->buffers_packed + from,
- vq->size - from);
- async->last_buffer_idx_packed = 0;
- nr_left -= vq->size - from;
+ flags &= ~VRING_DESC_F_USED;
+ flags &= ~VRING_DESC_F_AVAIL;
}
- } while (nr_left > 0);
+
+ if (i > 0) {
+ vq->desc_packed[vq->last_used_idx].flags = flags;
+ } else {
+ head_idx = vq->last_used_idx;
+ head_flags = flags;
+ }
+
+ vq_inc_last_used_packed(vq, async->buffers_packed[from].count);
+
+ from++;
+ if (from == vq->size)
+ from = 0;
+ }
+
+ vq->desc_packed[head_idx].flags = head_flags;
+ async->last_buffer_idx_packed = from;
}
static __rte_always_inline uint16_t
--
2.25.1
next reply other threads:[~2021-11-04 3:55 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-11-04 10:19 Jiayu Hu [this message]
2021-11-08 8:42 ` Maxime Coquelin
2021-11-10 12:40 ` [dpdk-dev] [PATCH v2] " Jiayu Hu
2021-11-16 6:27 ` Xia, Chenbo
2021-11-16 15:17 ` [PATCH v3] " Jiayu Hu
2021-11-16 9:59 ` Maxime Coquelin
2021-11-16 10:25 ` Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1636021170-230805-1-git-send-email-jiayu.hu@intel.com \
--to=jiayu.hu@intel.com \
--cc=chenbo.xia@intel.com \
--cc=cheng1.jiang@intel.com \
--cc=dev@dpdk.org \
--cc=maxime.coquelin@redhat.com \
--cc=stable@dpdk.org \
--cc=xingguang.he@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).