From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: tiwei.bie@intel.com, zhihong.wang@intel.com,
jfreimann@redhat.com, dev@dpdk.org
Cc: mst@redhat.com, jasowang@redhat.com, wexu@redhat.com,
Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [dpdk-dev] [PATCH v5 09/15] vhost: add shadow used ring support for packed rings
Date: Fri, 22 Jun 2018 15:43:21 +0200 [thread overview]
Message-ID: <20180622134327.18973-10-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20180622134327.18973-1-maxime.coquelin@redhat.com>
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/librte_vhost/vhost.c | 9 ++++--
lib/librte_vhost/vhost.h | 13 ++++++--
lib/librte_vhost/vhost_user.c | 64 +++++++++++++++++++++++++++++----------
lib/librte_vhost/virtio_net.c | 70 +++++++++++++++++++++++++++++++++++++++++--
4 files changed, 132 insertions(+), 24 deletions(-)
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index a85c6646f..7cbf1eded 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -94,9 +94,12 @@ cleanup_device(struct virtio_net *dev, int destroy)
}
void
-free_vq(struct vhost_virtqueue *vq)
+free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
- rte_free(vq->shadow_used_ring);
+ if (vq_is_packed(dev))
+ rte_free(vq->shadow_used_packed);
+ else
+ rte_free(vq->shadow_used_split);
rte_free(vq->batch_copy_elems);
rte_mempool_free(vq->iotlb_pool);
rte_free(vq);
@@ -111,7 +114,7 @@ free_device(struct virtio_net *dev)
uint32_t i;
for (i = 0; i < dev->nr_vring; i++)
- free_vq(dev->virtqueue[i]);
+ free_vq(dev, dev->virtqueue[i]);
rte_free(dev);
}
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 671b4b3bf..62d49f238 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -80,6 +80,12 @@ struct log_cache_entry {
unsigned long val;
};
+struct vring_used_elem_packed {
+ uint32_t id;
+ uint32_t len;
+ uint32_t count;
+};
+
/**
* Structure contains variables relevant to RX/TX virtqueues.
*/
@@ -119,7 +125,10 @@ struct vhost_virtqueue {
struct zcopy_mbuf *zmbufs;
struct zcopy_mbuf_list zmbuf_list;
- struct vring_used_elem *shadow_used_ring;
+ union {
+ struct vring_used_elem *shadow_used_split;
+ struct vring_used_elem_packed *shadow_used_packed;
+ };
uint16_t shadow_used_idx;
struct vhost_vring_addr ring_addrs;
@@ -553,7 +562,7 @@ void reset_device(struct virtio_net *dev);
void vhost_destroy_device(int);
void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
-void free_vq(struct vhost_virtqueue *vq);
+void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index b6097c085..a08d99314 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -244,7 +244,7 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
dev->virtqueue[dev->nr_vring] = NULL;
cleanup_vq(vq, 1);
- free_vq(vq);
+ free_vq(dev, vq);
}
}
@@ -293,13 +293,26 @@ vhost_user_set_vring_num(struct virtio_net *dev,
TAILQ_INIT(&vq->zmbuf_list);
}
- vq->shadow_used_ring = rte_malloc(NULL,
+ if (vq_is_packed(dev)) {
+ vq->shadow_used_packed = rte_malloc(NULL,
+ vq->size *
+ sizeof(struct vring_used_elem_packed),
+ RTE_CACHE_LINE_SIZE);
+ if (!vq->shadow_used_packed) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to allocate memory for shadow used ring.\n");
+ return -1;
+ }
+
+ } else {
+ vq->shadow_used_split = rte_malloc(NULL,
vq->size * sizeof(struct vring_used_elem),
RTE_CACHE_LINE_SIZE);
- if (!vq->shadow_used_ring) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "failed to allocate memory for shadow used ring.\n");
- return -1;
+ if (!vq->shadow_used_split) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to allocate memory for shadow used ring.\n");
+ return -1;
+ }
}
vq->batch_copy_elems = rte_malloc(NULL,
@@ -326,7 +339,8 @@ numa_realloc(struct virtio_net *dev, int index)
struct virtio_net *old_dev;
struct vhost_virtqueue *old_vq, *vq;
struct zcopy_mbuf *new_zmbuf;
- struct vring_used_elem *new_shadow_used_ring;
+ struct vring_used_elem *new_shadow_used_split;
+ struct vring_used_elem_packed *new_shadow_used_packed;
struct batch_copy_elem *new_batch_copy_elems;
int ret;
@@ -361,13 +375,26 @@ numa_realloc(struct virtio_net *dev, int index)
vq->zmbufs = new_zmbuf;
}
- new_shadow_used_ring = rte_malloc_socket(NULL,
- vq->size * sizeof(struct vring_used_elem),
- RTE_CACHE_LINE_SIZE,
- newnode);
- if (new_shadow_used_ring) {
- rte_free(vq->shadow_used_ring);
- vq->shadow_used_ring = new_shadow_used_ring;
+ if (vq_is_packed(dev)) {
+ new_shadow_used_packed = rte_malloc_socket(NULL,
+ vq->size *
+ sizeof(struct vring_used_elem_packed),
+ RTE_CACHE_LINE_SIZE,
+ newnode);
+ if (new_shadow_used_packed) {
+ rte_free(vq->shadow_used_packed);
+ vq->shadow_used_packed = new_shadow_used_packed;
+ }
+ } else {
+ new_shadow_used_split = rte_malloc_socket(NULL,
+ vq->size *
+ sizeof(struct vring_used_elem),
+ RTE_CACHE_LINE_SIZE,
+ newnode);
+ if (new_shadow_used_split) {
+ rte_free(vq->shadow_used_split);
+ vq->shadow_used_split = new_shadow_used_split;
+ }
}
new_batch_copy_elems = rte_malloc_socket(NULL,
@@ -1062,8 +1089,13 @@ vhost_user_get_vring_base(struct virtio_net *dev,
if (dev->dequeue_zero_copy)
free_zmbufs(vq);
- rte_free(vq->shadow_used_ring);
- vq->shadow_used_ring = NULL;
+ if (vq_is_packed(dev)) {
+ rte_free(vq->shadow_used_packed);
+ vq->shadow_used_packed = NULL;
+ } else {
+ rte_free(vq->shadow_used_split);
+ vq->shadow_used_split = NULL;
+ }
rte_free(vq->batch_copy_elems);
vq->batch_copy_elems = NULL;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 35f8cf90a..9571d5cdc 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -20,6 +20,7 @@
#include "iotlb.h"
#include "vhost.h"
+#include "virtio-packed.h"
#define MAX_PKT_BURST 32
@@ -82,7 +83,7 @@ do_flush_shadow_used_ring_split(struct virtio_net *dev,
uint16_t to, uint16_t from, uint16_t size)
{
rte_memcpy(&vq->used->ring[to],
- &vq->shadow_used_ring[from],
+ &vq->shadow_used_split[from],
size * sizeof(struct vring_used_elem));
vhost_log_cache_used_vring(dev, vq,
offsetof(struct vring_used, ring[to]),
@@ -126,8 +127,71 @@ update_shadow_used_ring_split(struct vhost_virtqueue *vq,
{
uint16_t i = vq->shadow_used_idx++;
- vq->shadow_used_ring[i].id = desc_idx;
- vq->shadow_used_ring[i].len = len;
+ vq->shadow_used_split[i].id = desc_idx;
+ vq->shadow_used_split[i].len = len;
+}
+
+static __rte_always_inline void
+flush_shadow_used_ring_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq)
+{
+ int i;
+ uint16_t used_idx = vq->last_used_idx;
+
+ /* Split loop in two to save memory barriers */
+ for (i = 0; i < vq->shadow_used_idx; i++) {
+ vq->desc_packed[used_idx].index = vq->shadow_used_packed[i].id;
+ vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
+
+ used_idx += vq->shadow_used_packed[i].count;
+ }
+
+ rte_smp_wmb();
+
+ for (i = 0; i < vq->shadow_used_idx; i++) {
+ uint16_t flags;
+
+ if (vq->shadow_used_packed[i].len)
+ flags = VRING_DESC_F_WRITE;
+ else
+ flags = 0;
+
+ if (vq->used_wrap_counter) {
+ flags |= VRING_DESC_F_USED;
+ flags |= VRING_DESC_F_AVAIL;
+ } else {
+ flags &= ~VRING_DESC_F_USED;
+ flags &= ~VRING_DESC_F_AVAIL;
+ }
+
+ vq->desc_packed[vq->last_used_idx].flags = flags;
+
+ vhost_log_cache_used_vring(dev, vq,
+ vq->last_used_idx *
+ sizeof(struct vring_desc_packed),
+ sizeof(struct vring_desc_packed));
+
+ vq->last_used_idx += vq->shadow_used_packed[i].count;
+ if (vq->last_used_idx >= vq->size) {
+ vq->used_wrap_counter ^= 1;
+ vq->last_used_idx -= vq->size;
+ }
+ }
+
+ rte_smp_wmb();
+ vq->shadow_used_idx = 0;
+ vhost_log_cache_sync(dev, vq);
+}
+
+static __rte_always_inline void
+update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
+ uint16_t desc_idx, uint16_t len, uint16_t count)
+{
+ uint16_t i = vq->shadow_used_idx++;
+
+ vq->shadow_used_packed[i].id = desc_idx;
+ vq->shadow_used_packed[i].len = len;
+ vq->shadow_used_packed[i].count = count;
}
static inline void
--
2.14.4
next prev parent reply other threads:[~2018-06-22 13:44 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-06-22 13:43 [dpdk-dev] [PATCH v5 00/15] Vhost: add support to packed ring layout Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 01/15] vhost: add virtio packed virtqueue defines Maxime Coquelin
2018-06-29 15:47 ` Tiwei Bie
2018-06-29 16:20 ` Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 02/15] vhost: add helpers for packed virtqueues Maxime Coquelin
2018-06-29 15:51 ` Tiwei Bie
2018-06-29 16:21 ` Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 03/15] vhost: vring address setup for packed queues Maxime Coquelin
2018-06-29 15:59 ` Tiwei Bie
2018-06-29 16:34 ` Maxime Coquelin
2018-06-30 2:18 ` Tiwei Bie
2018-07-01 9:58 ` Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 04/15] vhost: clear shadow used table index at flush time Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 05/15] vhost: make indirect desc table copy desc type agnostic Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 06/15] vhost: clear batch copy index at copy time Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 07/15] vhost: extract split ring handling from Rx and Tx functions Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 08/15] vhost: append shadow used ring function names with split Maxime Coquelin
2018-06-22 13:43 ` Maxime Coquelin [this message]
2018-06-29 16:08 ` [dpdk-dev] [PATCH v5 09/15] vhost: add shadow used ring support for packed rings Tiwei Bie
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 10/15] vhost: create descriptor mapping function Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 11/15] vhost: add vector filling support for packed ring Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 12/15] vhost: add Rx " Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 13/15] vhost: add Tx " Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 14/15] vhost: add notification " Maxime Coquelin
2018-06-29 16:22 ` Tiwei Bie
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 15/15] vhost: advertize packed ring layout support Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180622134327.18973-10-maxime.coquelin@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=dev@dpdk.org \
--cc=jasowang@redhat.com \
--cc=jfreimann@redhat.com \
--cc=mst@redhat.com \
--cc=tiwei.bie@intel.com \
--cc=wexu@redhat.com \
--cc=zhihong.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).