From: Jens Freimann <jfreimann@redhat.com>
To: dev@dpdk.org
Cc: tiwei.bie@intel.com, yliu@fridaylinux.org,
maxime.coquelin@redhat.com, mst@redhat.com
Subject: [dpdk-dev] [PATCH 12/17] vhost: dequeue for packed queues
Date: Fri, 16 Mar 2018 16:21:15 +0100 [thread overview]
Message-ID: <20180316152120.13199-13-jfreimann@redhat.com> (raw)
In-Reply-To: <20180316152120.13199-1-jfreimann@redhat.com>
Implement code to dequeue and process descriptors from
the vring if VIRTIO_F_PACKED is enabled.
Check if descriptor was made available by driver by looking at
VIRTIO_F_DESC_AVAIL flag in descriptor. If so dequeue and set
the used flag VIRTIO_F_DESC_USED to the current value of the
used wrap counter.
Used ring wrap counter needs to be toggled when last descriptor is
written out. This allows the host/guest to detect new descriptors even
after the ring has wrapped.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
lib/librte_vhost/vhost.c | 1 +
lib/librte_vhost/vhost.h | 1 +
lib/librte_vhost/virtio_net.c | 228 ++++++++++++++++++++++++++++++++++++++++++
3 files changed, 230 insertions(+)
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index a300812..8cba10d 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -198,6 +198,7 @@ struct virtio_net *
vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
+ vq->used_wrap_counter = 1;
vhost_user_iotlb_init(dev, vring_idx);
/* Backends are set to -1 indicating an inactive device. */
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index d35c4b1..f77fefe 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -108,6 +108,7 @@ struct vhost_virtqueue {
struct batch_copy_elem *batch_copy_elems;
uint16_t batch_copy_nb_elems;
+ uint32_t used_wrap_counter;
rte_rwlock_t iotlb_lock;
rte_rwlock_t iotlb_pending_lock;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 700aca7..8f59e4f 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -19,6 +19,7 @@
#include "iotlb.h"
#include "vhost.h"
+#include "virtio-1.1.h"
#define MAX_PKT_BURST 32
@@ -1118,6 +1119,233 @@
}
}
+static inline uint16_t
+dequeue_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf *m,
+ struct vring_desc_packed *descs)
+{
+ struct vring_desc_packed *desc;
+ uint64_t desc_addr;
+ uint32_t desc_avail, desc_offset;
+ uint32_t mbuf_avail, mbuf_offset;
+ uint32_t cpy_len;
+ struct rte_mbuf *cur = m, *prev = m;
+ struct virtio_net_hdr *hdr = NULL;
+ uint16_t head_idx = vq->last_used_idx & (vq->size - 1);
+ int wrap_counter = vq->used_wrap_counter;
+ int rc = 0;
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (unlikely(vq->enabled == 0))
+ goto out;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ desc = &descs[vq->last_used_idx & (vq->size - 1)];
+ if (unlikely((desc->len < dev->vhost_hlen)) ||
+ (desc->flags & VRING_DESC_F_INDIRECT)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "INDIRECT not supported yet\n");
+ rc = -1;
+ goto out;
+ }
+
+ desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
+ sizeof(*desc), VHOST_ACCESS_RO);
+
+ if (unlikely(!desc_addr)) {
+ rc = -1;
+ goto out;
+ }
+
+ if (virtio_net_with_host_offload(dev)) {
+ hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
+ rte_prefetch0(hdr);
+ }
+
+ /*
+ * A virtio driver normally uses at least 2 desc buffers
+ * for Tx: the first for storing the header, and others
+ * for storing the data.
+ */
+ if (likely((desc->len == dev->vhost_hlen) &&
+ (desc->flags & VRING_DESC_F_NEXT) != 0)) {
+ if ((++vq->last_used_idx & (vq->size - 1)) == 0)
+ toggle_wrap_counter(vq);
+
+ desc = &descs[vq->last_used_idx & (vq->size - 1)];
+
+ if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "INDIRECT not supported yet\n");
+ rc = -1;
+ goto out;
+ }
+
+ desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
+ sizeof(*desc), VHOST_ACCESS_RO);
+ if (unlikely(!desc_addr)) {
+ rc = -1;
+ goto out;
+ }
+
+ desc_offset = 0;
+ desc_avail = desc->len;
+ } else {
+ desc_avail = desc->len - dev->vhost_hlen;
+ desc_offset = dev->vhost_hlen;
+ }
+
+ rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset));
+
+ PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), desc_avail, 0);
+
+ mbuf_offset = 0;
+ mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
+ while (1) {
+ uint64_t hpa;
+
+ cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+
+ /*
+ * A desc buf might across two host physical pages that are
+ * not continuous. In such case (gpa_to_hpa returns 0), data
+ * will be copied even though zero copy is enabled.
+ */
+ if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
+ desc->addr + desc_offset, cpy_len)))) {
+ cur->data_len = cpy_len;
+ cur->data_off = 0;
+ cur->buf_addr = (void *)(uintptr_t)desc_addr;
+ cur->buf_physaddr = hpa;
+
+ /*
+ * In zero copy mode, one mbuf can only reference data
+ * for one or partial of one desc buff.
+ */
+ mbuf_avail = cpy_len;
+ } else {
+ rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
+ mbuf_offset),
+ (void *)((uintptr_t)(desc_addr + desc_offset)),
+ cpy_len);
+ }
+
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ desc_avail -= cpy_len;
+ desc_offset += cpy_len;
+
+ /* This desc reaches to its end, get the next one */
+ if (desc_avail == 0) {
+ if ((desc->flags & VRING_DESC_F_NEXT) == 0)
+ break;
+
+ if ((++vq->last_used_idx & (vq->size - 1)) == 0)
+ toggle_wrap_counter(vq);
+
+ desc = &descs[vq->last_used_idx & (vq->size - 1)];
+ if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) {
+ RTE_LOG(ERR, VHOST_DATA, "INDIRECT not supported yet");
+ }
+
+ desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
+ sizeof(*desc), VHOST_ACCESS_RO);
+ if (unlikely(!desc_addr)) {
+ rc = -1;
+ goto out;
+ }
+
+ rte_prefetch0((void *)(uintptr_t)desc_addr);
+
+ desc_offset = 0;
+ desc_avail = desc->len;
+
+ PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0);
+ }
+
+ /*
+ * This mbuf reaches to its end, get a new one
+ * to hold more data.
+ */
+ if (mbuf_avail == 0) {
+ cur = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(cur == NULL)) {
+ RTE_LOG(ERR, VHOST_DATA, "Failed to "
+ "allocate memory for mbuf.\n");
+ rc = -1;
+ goto out;
+ }
+
+ prev->next = cur;
+ prev->data_len = mbuf_offset;
+ m->nb_segs += 1;
+ m->pkt_len += mbuf_offset;
+ prev = cur;
+
+ mbuf_offset = 0;
+ mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
+ }
+ }
+
+ if (hdr)
+ vhost_dequeue_offload(hdr, m);
+
+ if ((++vq->last_used_idx & (vq->size - 1)) == 0)
+ toggle_wrap_counter(vq);
+
+ rte_smp_wmb();
+ _set_desc_used(&descs[head_idx], wrap_counter);
+
+ prev->data_len = mbuf_offset;
+ m->pkt_len += mbuf_offset;
+
+out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return rc;
+}
+
+static inline uint16_t
+vhost_dequeue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
+ uint16_t count)
+{
+ uint16_t i;
+ uint16_t idx;
+ struct vring_desc_packed *desc = vq->desc_packed;
+ int err;
+
+ count = RTE_MIN(MAX_PKT_BURST, count);
+ for (i = 0; i < count; i++) {
+ idx = vq->last_used_idx & (vq->size - 1);
+ if (!desc_is_avail(vq, &desc[idx]))
+ break;
+ rte_smp_rmb();
+
+ pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(pkts[i] == NULL)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "Failed to allocate memory for mbuf.\n");
+ break;
+ }
+
+ err = dequeue_desc(dev, vq, mbuf_pool, pkts[i], desc);
+ if (unlikely(err)) {
+ rte_pktmbuf_free(pkts[i]);
+ break;
+ }
+ }
+
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return i;
+}
+
uint16_t
rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
--
1.8.3.1
next prev parent reply other threads:[~2018-03-16 15:22 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-03-16 15:21 [dpdk-dev] [PATCH 00/17] implement packed virtqueues Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 01/17] net/virtio: vring init for packed queues Jens Freimann
2018-03-19 8:03 ` Tiwei Bie
2018-04-04 7:33 ` Maxime Coquelin
2018-04-04 7:48 ` Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 02/17] net/virtio: don't call virtio_disable_intr() " Jens Freimann
2018-03-19 8:06 ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 03/17] net/virtio: add virtio 1.1 defines Jens Freimann
2018-03-19 8:16 ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 04/17] net/virtio: add packed virtqueue helpers Jens Freimann
2018-03-19 8:23 ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 05/17] net/virtio: don't dump split virtqueue data Jens Freimann
2018-03-19 8:25 ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 06/17] net/virtio-user: add option to use packed queues Jens Freimann
2018-03-19 8:33 ` Tiwei Bie
2018-03-26 10:12 ` Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 07/17] net/virtio: implement transmit path for " Jens Freimann
2018-03-19 9:04 ` Tiwei Bie
2018-03-19 9:23 ` Jens Freimann
2018-03-26 2:18 ` Jason Wang
2018-03-16 15:21 ` [dpdk-dev] [PATCH 08/17] net/virtio: implement receive " Jens Freimann
2018-03-19 10:15 ` Tiwei Bie
2018-03-26 2:15 ` Jason Wang
2018-03-16 15:21 ` [dpdk-dev] [PATCH 09/17] vhost: add virtio 1.1 defines Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 10/17] vhost: vring address setup for packed queues Jens Freimann
2018-03-19 10:25 ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 11/17] vhost: add helpers for packed virtqueues Jens Freimann
2018-03-19 10:39 ` Tiwei Bie
2018-03-21 9:17 ` Jens Freimann
2018-03-16 15:21 ` Jens Freimann [this message]
2018-03-19 10:55 ` [dpdk-dev] [PATCH 12/17] vhost: dequeue for packed queues Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 13/17] vhost: packed queue enqueue path Jens Freimann
2018-03-19 11:02 ` Tiwei Bie
2018-03-21 8:45 ` Jens Freimann
2018-03-21 8:58 ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 14/17] vhost: enable packed virtqueues Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 15/17] net/virtio: disable ctrl virtqueue for packed rings Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 16/17] net/virtio: add support for mergeable buffers with packed virtqueues Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 17/17] vhost: support mergeable rx buffers with packed queues Jens Freimann
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180316152120.13199-13-jfreimann@redhat.com \
--to=jfreimann@redhat.com \
--cc=dev@dpdk.org \
--cc=maxime.coquelin@redhat.com \
--cc=mst@redhat.com \
--cc=tiwei.bie@intel.com \
--cc=yliu@fridaylinux.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).