From: "Wang, Zhihong" <zhihong.wang@intel.com>
To: Maxime Coquelin <maxime.coquelin@redhat.com>,
"dev@dpdk.org" <dev@dpdk.org>
Cc: "yuanhan.liu@linux.intel.com" <yuanhan.liu@linux.intel.com>,
"thomas.monjalon@6wind.com" <thomas.monjalon@6wind.com>
Subject: Re: [dpdk-dev] [PATCH v5 5/6] vhost: batch update used ring
Date: Wed, 14 Sep 2016 08:43:30 +0000 [thread overview]
Message-ID: <8F6C2BD409508844A0EFC19955BE09414E70FB6A@SHSMSX103.ccr.corp.intel.com> (raw)
In-Reply-To: <473ef253-86bf-9a7a-d028-21c27690a421@redhat.com>
> -----Original Message-----
> From: Maxime Coquelin [mailto:maxime.coquelin@redhat.com]
> Sent: Monday, September 12, 2016 11:46 PM
> To: Wang, Zhihong <zhihong.wang@intel.com>; dev@dpdk.org
> Cc: yuanhan.liu@linux.intel.com; thomas.monjalon@6wind.com
> Subject: Re: [PATCH v5 5/6] vhost: batch update used ring
>
>
>
> On 09/09/2016 05:39 AM, Zhihong Wang wrote:
> > This patch enables batch update of the used ring for better efficiency.
> >
> > Signed-off-by: Zhihong Wang <zhihong.wang@intel.com>
> > ---
> > Changes in v4:
> >
> > 1. Free shadow used ring in the right place.
> >
> > 2. Add failure check for shadow used ring malloc.
> >
> > lib/librte_vhost/vhost.c | 20 ++++++++++++--
> > lib/librte_vhost/vhost.h | 4 +++
> > lib/librte_vhost/vhost_user.c | 31 +++++++++++++++++----
> > lib/librte_vhost/virtio_net.c | 64
> +++++++++++++++++++++++++++++++++++--------
> > 4 files changed, 101 insertions(+), 18 deletions(-)
> >
> > diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
> > index 46095c3..cb31cdd 100644
> > --- a/lib/librte_vhost/vhost.c
> > +++ b/lib/librte_vhost/vhost.c
> > @@ -119,10 +119,26 @@ cleanup_device(struct virtio_net *dev, int
> destroy)
> > static void
> > free_device(struct virtio_net *dev)
> > {
> > + struct vhost_virtqueue *vq_0;
> > + struct vhost_virtqueue *vq_1;
> > uint32_t i;
> >
> > - for (i = 0; i < dev->virt_qp_nb; i++)
> > - rte_free(dev->virtqueue[i * VIRTIO_QNUM]);
> > + for (i = 0; i < dev->virt_qp_nb; i++) {
> > + vq_0 = dev->virtqueue[i * VIRTIO_QNUM];
> > + if (vq_0->shadow_used_ring) {
> > + rte_free(vq_0->shadow_used_ring);
> > + vq_0->shadow_used_ring = NULL;
> > + }
> > +
> > + vq_1 = dev->virtqueue[i * VIRTIO_QNUM + 1];
> > + if (vq_1->shadow_used_ring) {
> > + rte_free(vq_1->shadow_used_ring);
> > + vq_1->shadow_used_ring = NULL;
> > + }
> > +
> > + /* malloc together, free together */
> > + rte_free(vq_0);
> > + }
> >
> > rte_free(dev);
> > }
> > diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> > index 9707dfc..381dc27 100644
> > --- a/lib/librte_vhost/vhost.h
> > +++ b/lib/librte_vhost/vhost.h
> > @@ -85,6 +85,10 @@ struct vhost_virtqueue {
> >
> > /* Physical address of used ring, for logging */
> > uint64_t log_guest_addr;
> > +
> > + /* Shadow used ring for performance */
> > + struct vring_used_elem *shadow_used_ring;
> > + uint32_t shadow_used_idx;
> > } __rte_cache_aligned;
> >
> > /* Old kernels have no such macro defined */
> > diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
> > index eee99e9..d7cf1ed 100644
> > --- a/lib/librte_vhost/vhost_user.c
> > +++ b/lib/librte_vhost/vhost_user.c
> > @@ -193,7 +193,21 @@ static int
> > vhost_user_set_vring_num(struct virtio_net *dev,
> > struct vhost_vring_state *state)
> > {
> > - dev->virtqueue[state->index]->size = state->num;
> > + struct vhost_virtqueue *vq;
> > +
> > + vq = dev->virtqueue[state->index];
> > + vq->size = state->num;
> > + if (!vq->shadow_used_ring) {
> > + vq->shadow_used_ring = rte_malloc(NULL,
> > + vq->size * sizeof(struct vring_used_elem),
> > + RTE_CACHE_LINE_SIZE);
> > + if (!vq->shadow_used_ring) {
> > + RTE_LOG(ERR, VHOST_CONFIG,
> > + "Failed to allocate memory"
> > + " for shadow used ring.\n");
> > + return -1;
> > + }
> > + }
> >
> > return 0;
> > }
> > @@ -611,14 +625,21 @@ static int
> > vhost_user_get_vring_base(struct virtio_net *dev,
> > struct vhost_vring_state *state)
> > {
> > + struct vhost_virtqueue *vq;
> > +
> > /* We have to stop the queue (virtio) if it is running. */
> > if (dev->flags & VIRTIO_DEV_RUNNING) {
> > dev->flags &= ~VIRTIO_DEV_RUNNING;
> > notify_ops->destroy_device(dev->vid);
> > }
> >
> > + vq = dev->virtqueue[state->index];
> > /* Here we are safe to get the last used index */
> > - state->num = dev->virtqueue[state->index]->last_used_idx;
> > + state->num = vq->last_used_idx;
> > + if (vq->shadow_used_ring) {
> > + rte_free(vq->shadow_used_ring);
> > + vq->shadow_used_ring = NULL;
> > + }
> >
> > RTE_LOG(INFO, VHOST_CONFIG,
> > "vring base idx:%d file:%d\n", state->index, state->num);
> > @@ -627,10 +648,10 @@ vhost_user_get_vring_base(struct virtio_net
> *dev,
> > * sent and only sent in vhost_vring_stop.
> > * TODO: cleanup the vring, it isn't usable since here.
> > */
> > - if (dev->virtqueue[state->index]->kickfd >= 0)
> > - close(dev->virtqueue[state->index]->kickfd);
> > + if (vq->kickfd >= 0)
> > + close(vq->kickfd);
> >
> > - dev->virtqueue[state->index]->kickfd =
> VIRTIO_UNINITIALIZED_EVENTFD;
> > + vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
> >
> > return 0;
> > }
> > diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> > index b38f18f..e9f6353 100644
> > --- a/lib/librte_vhost/virtio_net.c
> > +++ b/lib/librte_vhost/virtio_net.c
> > @@ -134,17 +134,52 @@ virtio_enqueue_offload(struct rte_mbuf
> *m_buf, struct virtio_net_hdr *net_hdr)
> > }
> >
> > static inline void __attribute__((always_inline))
> > -update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
> > - uint32_t desc_chain_head, uint32_t desc_chain_len)
> > +update_used_ring(struct vhost_virtqueue *vq, uint32_t
> desc_chain_head,
> > + uint32_t desc_chain_len)
> > {
> > - uint32_t used_idx = vq->last_used_idx & (vq->size - 1);
> > -
> > - vq->used->ring[used_idx].id = desc_chain_head;
> > - vq->used->ring[used_idx].len = desc_chain_len;
> > + vq->shadow_used_ring[vq->shadow_used_idx].id =
> desc_chain_head;
> > + vq->shadow_used_ring[vq->shadow_used_idx].len =
> desc_chain_len;
> > + vq->shadow_used_idx++;
> > vq->last_used_idx++;
> > - vhost_log_used_vring(dev, vq, offsetof(struct vring_used,
> > - ring[used_idx]),
> > - sizeof(vq->used->ring[used_idx]));
> > +}
> > +
> > +static inline void __attribute__((always_inline))
> > +flush_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
> > + uint32_t used_idx_start)
> > +{
> > + if (used_idx_start + vq->shadow_used_idx < vq->size) {
> > + rte_memcpy(&vq->used->ring[used_idx_start],
> > + &vq->shadow_used_ring[0],
> > + vq->shadow_used_idx *
> > + sizeof(struct vring_used_elem));
> > + vhost_log_used_vring(dev, vq,
> > + offsetof(struct vring_used,
> > + ring[used_idx_start]),
> > + vq->shadow_used_idx *
> > + sizeof(struct vring_used_elem));
> > + } else {
> > + uint32_t part_1 = vq->size - used_idx_start;
> > + uint32_t part_2 = vq->shadow_used_idx - part_1;
> > +
> > + rte_memcpy(&vq->used->ring[used_idx_start],
> > + &vq->shadow_used_ring[0],
> > + part_1 *
> > + sizeof(struct vring_used_elem));
> > + vhost_log_used_vring(dev, vq,
> > + offsetof(struct vring_used,
> > + ring[used_idx_start]),
> > + part_1 *
> > + sizeof(struct vring_used_elem));
> > + rte_memcpy(&vq->used->ring[0],
> > + &vq->shadow_used_ring[part_1],
> > + part_2 *
> > + sizeof(struct vring_used_elem));
> > + vhost_log_used_vring(dev, vq,
> > + offsetof(struct vring_used,
> > + ring[0]),
> > + part_2 *
> > + sizeof(struct vring_used_elem));
> > + }
> > }
> Is expanding the code done for performance purpose?
Hi Maxime,
Yes theoretically this has the least branch number.
And I think the logic is simpler this way.
Thanks
Zhihong
> Or maybe we could have a loop to do that?
> Something like this (not compiled, not tested):
>
> static inline void __attribute__((always_inline))
> flush_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
> {
> uint32_t to = used_idx_start;
> uint32_t from = 0;
> uint32_t count;
>
> if (used_idx_start + vq->shadow_used_idx < vq->size)
> count = vq->shadow_used_idx;
> else
> count = vq->size - used_idx_start;
>
> do {
> rte_memcpy(&vq->used->ring[to],
> &vq->shadow_used_ring[from],
> count * sizeof(struct vring_used_elem));
> vhost_log_used_vring(dev, vq,
> offsetof(struct vring_used, ring[to]),
> count * sizeof(struct vring_used_elem));
>
> to = (to + count) & (vq->size - 1);
> from += count;
> count = vq->shadow_used_idx - count;
> } while (count);
> }
>
> Regards,
> Maxime
next prev parent reply other threads:[~2016-09-14 8:43 UTC|newest]
Thread overview: 140+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-08-16 3:50 [dpdk-dev] [PATCH] optimize vhost enqueue Zhihong Wang
2016-08-16 13:59 ` Maxime Coquelin
2016-08-17 1:45 ` Wang, Zhihong
2016-08-17 2:38 ` Yuanhan Liu
2016-08-17 6:41 ` Wang, Zhihong
2016-08-17 9:17 ` Maxime Coquelin
2016-08-17 9:51 ` Yuanhan Liu
2016-08-18 13:44 ` Wang, Zhihong
2016-08-17 10:07 ` Wang, Zhihong
2016-08-18 6:33 ` [dpdk-dev] [PATCH v2 0/6] vhost: optimize enqueue Zhihong Wang
2016-08-18 6:33 ` [dpdk-dev] [PATCH v2 1/6] vhost: rewrite enqueue Zhihong Wang
2016-08-19 2:39 ` Yuanhan Liu
2016-08-19 7:07 ` Wang, Zhihong
2016-08-18 6:33 ` [dpdk-dev] [PATCH v2 2/6] vhost: remove obsolete Zhihong Wang
2016-08-19 2:32 ` Yuanhan Liu
2016-08-19 7:08 ` Wang, Zhihong
2016-08-18 6:33 ` [dpdk-dev] [PATCH v2 3/6] vhost: remove useless volatile Zhihong Wang
2016-08-18 6:33 ` [dpdk-dev] [PATCH v2 4/6] vhost: add desc prefetch Zhihong Wang
2016-08-18 6:33 ` [dpdk-dev] [PATCH v2 5/6] vhost: batch update used ring Zhihong Wang
2016-08-18 6:33 ` [dpdk-dev] [PATCH v2 6/6] vhost: optimize cache access Zhihong Wang
2016-08-19 5:43 ` [dpdk-dev] [PATCH v3 0/5] vhost: optimize enqueue Zhihong Wang
2016-08-19 5:43 ` [dpdk-dev] [PATCH v3 1/5] vhost: rewrite enqueue Zhihong Wang
2016-08-22 9:35 ` Maxime Coquelin
2016-08-23 2:27 ` Wang, Zhihong
2016-08-25 4:00 ` Yuanhan Liu
2016-08-19 5:43 ` [dpdk-dev] [PATCH v3 2/5] vhost: remove useless volatile Zhihong Wang
2016-08-19 5:43 ` [dpdk-dev] [PATCH v3 3/5] vhost: add desc prefetch Zhihong Wang
2016-08-19 5:43 ` [dpdk-dev] [PATCH v3 4/5] vhost: batch update used ring Zhihong Wang
2016-08-25 3:48 ` Yuanhan Liu
2016-08-25 5:19 ` Wang, Zhihong
2016-08-19 5:43 ` [dpdk-dev] [PATCH v3 5/5] vhost: optimize cache access Zhihong Wang
2016-08-22 8:11 ` [dpdk-dev] [PATCH v3 0/5] vhost: optimize enqueue Maxime Coquelin
2016-08-22 10:01 ` Maxime Coquelin
2016-08-22 10:35 ` Thomas Monjalon
2016-08-24 3:37 ` Wang, Zhihong
2016-08-23 2:31 ` Wang, Zhihong
2016-08-23 10:43 ` Wang, Zhihong
2016-08-23 12:16 ` Maxime Coquelin
2016-08-23 12:22 ` Yuanhan Liu
2016-08-23 2:15 ` Wang, Zhihong
2016-09-21 8:50 ` Jianbo Liu
2016-09-21 9:27 ` Wang, Zhihong
2016-09-21 12:54 ` Jianbo Liu
2016-09-22 2:11 ` Wang, Zhihong
2016-09-22 2:29 ` Yuanhan Liu
2016-09-22 5:47 ` Jianbo Liu
2016-09-22 6:58 ` Wang, Zhihong
2016-09-22 9:01 ` Jianbo Liu
2016-09-22 10:04 ` Wang, Zhihong
2016-09-22 14:41 ` Jianbo Liu
2016-09-23 2:56 ` Wang, Zhihong
2016-09-23 10:41 ` Jianbo Liu
2016-09-23 13:41 ` Thomas Monjalon
2016-09-25 5:41 ` Wang, Zhihong
2016-09-26 5:12 ` Jianbo Liu
2016-09-26 5:25 ` Wang, Zhihong
2016-09-26 5:38 ` Jianbo Liu
2016-09-26 6:00 ` Wang, Zhihong
2016-09-26 4:24 ` Jianbo Liu
2016-09-26 5:37 ` Luke Gorrie
2016-09-26 5:40 ` Jianbo Liu
2016-09-27 10:21 ` Yuanhan Liu
2016-09-27 16:45 ` Wang, Zhihong
2016-10-09 12:09 ` Wang, Zhihong
2016-10-10 2:44 ` Yuanhan Liu
2016-10-10 5:31 ` Jianbo Liu
2016-10-10 6:22 ` Wang, Zhihong
2016-10-10 6:57 ` Jianbo Liu
2016-10-10 7:25 ` Wang, Zhihong
2016-10-12 2:53 ` Yuanhan Liu
2016-10-12 12:22 ` Wang, Zhihong
2016-10-12 15:31 ` Thomas Monjalon
2016-10-13 1:21 ` Wang, Zhihong
2016-10-13 3:51 ` Jianbo Liu
2016-10-13 5:33 ` Yuanhan Liu
2016-10-13 5:35 ` Yuanhan Liu
2016-10-13 6:02 ` Wang, Zhihong
2016-10-13 7:54 ` Maxime Coquelin
2016-10-13 9:23 ` Maxime Coquelin
2016-10-14 10:11 ` Yuanhan Liu
2016-08-30 3:35 ` [dpdk-dev] [PATCH v4 0/6] " Zhihong Wang
2016-08-30 3:35 ` [dpdk-dev] [PATCH v4 1/6] vhost: fix windows vm hang Zhihong Wang
2016-09-05 5:24 ` [dpdk-dev] [dpdk-stable] " Yuanhan Liu
2016-09-05 5:25 ` Wang, Zhihong
2016-09-05 5:40 ` Yuanhan Liu
2016-08-30 3:36 ` [dpdk-dev] [PATCH v4 2/6] vhost: rewrite enqueue Zhihong Wang
2016-09-05 6:39 ` Yuanhan Liu
2016-09-07 5:33 ` Yuanhan Liu
2016-09-07 5:39 ` Wang, Zhihong
2016-08-30 3:36 ` [dpdk-dev] [PATCH v4 3/6] vhost: remove useless volatile Zhihong Wang
2016-08-30 3:36 ` [dpdk-dev] [PATCH v4 4/6] vhost: add desc prefetch Zhihong Wang
2016-08-30 3:36 ` [dpdk-dev] [PATCH v4 5/6] vhost: batch update used ring Zhihong Wang
2016-08-30 3:36 ` [dpdk-dev] [PATCH v4 6/6] vhost: optimize cache access Zhihong Wang
2016-09-09 3:39 ` [dpdk-dev] [PATCH v5 0/6] vhost: optimize enqueue Zhihong Wang
2016-09-09 3:39 ` [dpdk-dev] [PATCH v5 1/6] vhost: fix windows vm hang Zhihong Wang
2016-09-09 3:39 ` [dpdk-dev] [PATCH v5 2/6] vhost: rewrite enqueue Zhihong Wang
2016-09-12 15:42 ` Maxime Coquelin
2016-09-14 8:20 ` Wang, Zhihong
2016-09-15 16:35 ` Maxime Coquelin
2016-09-12 16:26 ` Maxime Coquelin
2016-09-14 8:22 ` Wang, Zhihong
2016-09-18 14:19 ` Yuanhan Liu
2016-09-19 3:29 ` Wang, Zhihong
2016-09-09 3:39 ` [dpdk-dev] [PATCH v5 3/6] vhost: remove useless volatile Zhihong Wang
2016-09-09 3:39 ` [dpdk-dev] [PATCH v5 4/6] vhost: add desc prefetch Zhihong Wang
2016-09-09 3:39 ` [dpdk-dev] [PATCH v5 5/6] vhost: batch update used ring Zhihong Wang
2016-09-12 15:45 ` Maxime Coquelin
2016-09-14 8:43 ` Wang, Zhihong [this message]
2016-09-15 16:38 ` Maxime Coquelin
2016-09-18 2:55 ` Yuanhan Liu
2016-09-18 2:57 ` Wang, Zhihong
2016-09-09 3:39 ` [dpdk-dev] [PATCH v5 6/6] vhost: optimize cache access Zhihong Wang
2016-09-12 13:52 ` [dpdk-dev] [PATCH v5 0/6] vhost: optimize enqueue Maxime Coquelin
2016-09-12 13:56 ` Maxime Coquelin
2016-09-12 14:01 ` Yuanhan Liu
2016-09-20 2:00 ` [dpdk-dev] [PATCH v6 " Zhihong Wang
2016-09-20 2:00 ` [dpdk-dev] [PATCH v6 1/6] vhost: fix windows vm hang Zhihong Wang
2016-10-13 6:18 ` [dpdk-dev] [dpdk-stable] " Yuanhan Liu
2016-09-20 2:00 ` [dpdk-dev] [PATCH v6 2/6] vhost: rewrite enqueue Zhihong Wang
2016-09-22 9:58 ` Jianbo Liu
2016-09-22 10:13 ` Wang, Zhihong
2016-09-20 2:00 ` [dpdk-dev] [PATCH v6 3/6] vhost: remove useless volatile Zhihong Wang
2016-09-20 2:00 ` [dpdk-dev] [PATCH v6 4/6] vhost: add desc prefetch Zhihong Wang
2016-09-20 2:00 ` [dpdk-dev] [PATCH v6 5/6] vhost: batch update used ring Zhihong Wang
2016-09-20 2:00 ` [dpdk-dev] [PATCH v6 6/6] vhost: optimize cache access Zhihong Wang
2016-09-21 2:26 ` [dpdk-dev] [PATCH v6 0/6] vhost: optimize enqueue Yuanhan Liu
2016-09-21 4:39 ` Maxime Coquelin
2016-10-14 9:34 ` [dpdk-dev] [PATCH v7 0/7] vhost: optimize mergeable Rx path Yuanhan Liu
2016-10-14 9:34 ` [dpdk-dev] [PATCH v7 1/7] vhost: remove useless volatile Yuanhan Liu
2016-10-14 9:34 ` [dpdk-dev] [PATCH v7 2/7] vhost: optimize cache access Yuanhan Liu
2016-10-14 9:34 ` [dpdk-dev] [PATCH v7 3/7] vhost: simplify mergeable Rx vring reservation Yuanhan Liu
2016-10-25 22:08 ` Thomas Monjalon
2016-10-26 2:56 ` Yuanhan Liu
2016-10-14 9:34 ` [dpdk-dev] [PATCH v7 4/7] vhost: use last avail idx for avail ring reservation Yuanhan Liu
2016-10-14 9:34 ` [dpdk-dev] [PATCH v7 5/7] vhost: shadow used ring update Yuanhan Liu
2016-10-14 9:34 ` [dpdk-dev] [PATCH v7 6/7] vhost: prefetch avail ring Yuanhan Liu
2016-10-14 9:34 ` [dpdk-dev] [PATCH v7 7/7] vhost: retrieve avail head once Yuanhan Liu
2016-10-18 2:25 ` [dpdk-dev] [PATCH v7 0/7] vhost: optimize mergeable Rx path Jianbo Liu
2016-10-18 14:53 ` Maxime Coquelin
2016-10-21 7:51 ` Yuanhan Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=8F6C2BD409508844A0EFC19955BE09414E70FB6A@SHSMSX103.ccr.corp.intel.com \
--to=zhihong.wang@intel.com \
--cc=dev@dpdk.org \
--cc=maxime.coquelin@redhat.com \
--cc=thomas.monjalon@6wind.com \
--cc=yuanhan.liu@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).