From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id 35703567F for ; Mon, 12 Sep 2016 17:45:44 +0200 (CEST) Received: from int-mx14.intmail.prod.int.phx2.redhat.com (int-mx14.intmail.prod.int.phx2.redhat.com [10.5.11.27]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id A063CA075D; Mon, 12 Sep 2016 15:45:43 +0000 (UTC) Received: from [10.36.4.241] (vpn1-4-241.ams2.redhat.com [10.36.4.241]) by int-mx14.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id u8CFjfUx016072 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=NO); Mon, 12 Sep 2016 11:45:42 -0400 To: Zhihong Wang , dev@dpdk.org References: <1471319402-112998-1-git-send-email-zhihong.wang@intel.com> <1473392368-84903-1-git-send-email-zhihong.wang@intel.com> <1473392368-84903-6-git-send-email-zhihong.wang@intel.com> Cc: yuanhan.liu@linux.intel.com, thomas.monjalon@6wind.com From: Maxime Coquelin Message-ID: <473ef253-86bf-9a7a-d028-21c27690a421@redhat.com> Date: Mon, 12 Sep 2016 17:45:40 +0200 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Thunderbird/45.2.0 MIME-Version: 1.0 In-Reply-To: <1473392368-84903-6-git-send-email-zhihong.wang@intel.com> Content-Type: text/plain; charset=windows-1252; format=flowed Content-Transfer-Encoding: 7bit X-Scanned-By: MIMEDefang 2.68 on 10.5.11.27 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.25]); Mon, 12 Sep 2016 15:45:43 +0000 (UTC) Subject: Re: [dpdk-dev] [PATCH v5 5/6] vhost: batch update used ring X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 12 Sep 2016 15:45:44 -0000 On 09/09/2016 05:39 AM, Zhihong Wang wrote: > This patch enables batch update of the used ring for better efficiency. > > Signed-off-by: Zhihong Wang > --- > Changes in v4: > > 1. Free shadow used ring in the right place. > > 2. Add failure check for shadow used ring malloc. > > lib/librte_vhost/vhost.c | 20 ++++++++++++-- > lib/librte_vhost/vhost.h | 4 +++ > lib/librte_vhost/vhost_user.c | 31 +++++++++++++++++---- > lib/librte_vhost/virtio_net.c | 64 +++++++++++++++++++++++++++++++++++-------- > 4 files changed, 101 insertions(+), 18 deletions(-) > > diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c > index 46095c3..cb31cdd 100644 > --- a/lib/librte_vhost/vhost.c > +++ b/lib/librte_vhost/vhost.c > @@ -119,10 +119,26 @@ cleanup_device(struct virtio_net *dev, int destroy) > static void > free_device(struct virtio_net *dev) > { > + struct vhost_virtqueue *vq_0; > + struct vhost_virtqueue *vq_1; > uint32_t i; > > - for (i = 0; i < dev->virt_qp_nb; i++) > - rte_free(dev->virtqueue[i * VIRTIO_QNUM]); > + for (i = 0; i < dev->virt_qp_nb; i++) { > + vq_0 = dev->virtqueue[i * VIRTIO_QNUM]; > + if (vq_0->shadow_used_ring) { > + rte_free(vq_0->shadow_used_ring); > + vq_0->shadow_used_ring = NULL; > + } > + > + vq_1 = dev->virtqueue[i * VIRTIO_QNUM + 1]; > + if (vq_1->shadow_used_ring) { > + rte_free(vq_1->shadow_used_ring); > + vq_1->shadow_used_ring = NULL; > + } > + > + /* malloc together, free together */ > + rte_free(vq_0); > + } > > rte_free(dev); > } > diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h > index 9707dfc..381dc27 100644 > --- a/lib/librte_vhost/vhost.h > +++ b/lib/librte_vhost/vhost.h > @@ -85,6 +85,10 @@ struct vhost_virtqueue { > > /* Physical address of used ring, for logging */ > uint64_t log_guest_addr; > + > + /* Shadow used ring for performance */ > + struct vring_used_elem *shadow_used_ring; > + uint32_t shadow_used_idx; > } __rte_cache_aligned; > > /* Old kernels have no such macro defined */ > diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c > index eee99e9..d7cf1ed 100644 > --- a/lib/librte_vhost/vhost_user.c > +++ b/lib/librte_vhost/vhost_user.c > @@ -193,7 +193,21 @@ static int > vhost_user_set_vring_num(struct virtio_net *dev, > struct vhost_vring_state *state) > { > - dev->virtqueue[state->index]->size = state->num; > + struct vhost_virtqueue *vq; > + > + vq = dev->virtqueue[state->index]; > + vq->size = state->num; > + if (!vq->shadow_used_ring) { > + vq->shadow_used_ring = rte_malloc(NULL, > + vq->size * sizeof(struct vring_used_elem), > + RTE_CACHE_LINE_SIZE); > + if (!vq->shadow_used_ring) { > + RTE_LOG(ERR, VHOST_CONFIG, > + "Failed to allocate memory" > + " for shadow used ring.\n"); > + return -1; > + } > + } > > return 0; > } > @@ -611,14 +625,21 @@ static int > vhost_user_get_vring_base(struct virtio_net *dev, > struct vhost_vring_state *state) > { > + struct vhost_virtqueue *vq; > + > /* We have to stop the queue (virtio) if it is running. */ > if (dev->flags & VIRTIO_DEV_RUNNING) { > dev->flags &= ~VIRTIO_DEV_RUNNING; > notify_ops->destroy_device(dev->vid); > } > > + vq = dev->virtqueue[state->index]; > /* Here we are safe to get the last used index */ > - state->num = dev->virtqueue[state->index]->last_used_idx; > + state->num = vq->last_used_idx; > + if (vq->shadow_used_ring) { > + rte_free(vq->shadow_used_ring); > + vq->shadow_used_ring = NULL; > + } > > RTE_LOG(INFO, VHOST_CONFIG, > "vring base idx:%d file:%d\n", state->index, state->num); > @@ -627,10 +648,10 @@ vhost_user_get_vring_base(struct virtio_net *dev, > * sent and only sent in vhost_vring_stop. > * TODO: cleanup the vring, it isn't usable since here. > */ > - if (dev->virtqueue[state->index]->kickfd >= 0) > - close(dev->virtqueue[state->index]->kickfd); > + if (vq->kickfd >= 0) > + close(vq->kickfd); > > - dev->virtqueue[state->index]->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; > + vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; > > return 0; > } > diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c > index b38f18f..e9f6353 100644 > --- a/lib/librte_vhost/virtio_net.c > +++ b/lib/librte_vhost/virtio_net.c > @@ -134,17 +134,52 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) > } > > static inline void __attribute__((always_inline)) > -update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, > - uint32_t desc_chain_head, uint32_t desc_chain_len) > +update_used_ring(struct vhost_virtqueue *vq, uint32_t desc_chain_head, > + uint32_t desc_chain_len) > { > - uint32_t used_idx = vq->last_used_idx & (vq->size - 1); > - > - vq->used->ring[used_idx].id = desc_chain_head; > - vq->used->ring[used_idx].len = desc_chain_len; > + vq->shadow_used_ring[vq->shadow_used_idx].id = desc_chain_head; > + vq->shadow_used_ring[vq->shadow_used_idx].len = desc_chain_len; > + vq->shadow_used_idx++; > vq->last_used_idx++; > - vhost_log_used_vring(dev, vq, offsetof(struct vring_used, > - ring[used_idx]), > - sizeof(vq->used->ring[used_idx])); > +} > + > +static inline void __attribute__((always_inline)) > +flush_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, > + uint32_t used_idx_start) > +{ > + if (used_idx_start + vq->shadow_used_idx < vq->size) { > + rte_memcpy(&vq->used->ring[used_idx_start], > + &vq->shadow_used_ring[0], > + vq->shadow_used_idx * > + sizeof(struct vring_used_elem)); > + vhost_log_used_vring(dev, vq, > + offsetof(struct vring_used, > + ring[used_idx_start]), > + vq->shadow_used_idx * > + sizeof(struct vring_used_elem)); > + } else { > + uint32_t part_1 = vq->size - used_idx_start; > + uint32_t part_2 = vq->shadow_used_idx - part_1; > + > + rte_memcpy(&vq->used->ring[used_idx_start], > + &vq->shadow_used_ring[0], > + part_1 * > + sizeof(struct vring_used_elem)); > + vhost_log_used_vring(dev, vq, > + offsetof(struct vring_used, > + ring[used_idx_start]), > + part_1 * > + sizeof(struct vring_used_elem)); > + rte_memcpy(&vq->used->ring[0], > + &vq->shadow_used_ring[part_1], > + part_2 * > + sizeof(struct vring_used_elem)); > + vhost_log_used_vring(dev, vq, > + offsetof(struct vring_used, > + ring[0]), > + part_2 * > + sizeof(struct vring_used_elem)); > + } > } Is expanding the code done for performance purpose? Or maybe we could have a loop to do that? Something like this (not compiled, not tested): static inline void __attribute__((always_inline)) flush_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, { uint32_t to = used_idx_start; uint32_t from = 0; uint32_t count; if (used_idx_start + vq->shadow_used_idx < vq->size) count = vq->shadow_used_idx; else count = vq->size - used_idx_start; do { rte_memcpy(&vq->used->ring[to], &vq->shadow_used_ring[from], count * sizeof(struct vring_used_elem)); vhost_log_used_vring(dev, vq, offsetof(struct vring_used, ring[to]), count * sizeof(struct vring_used_elem)); to = (to + count) & (vq->size - 1); from += count; count = vq->shadow_used_idx - count; } while (count); } Regards, Maxime