DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: yuanhan.liu@linux.intel.com, dev@dpdk.org
Cc: mst@redhat.com, jianfeng.tan@intel.com, olivier.matz@6wind.com,
	stephen@networkplumber.org,
	Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [dpdk-dev] [PATCH v3] vhost: Only access header if offloading is supported in dequeue path
Date: Fri, 14 Oct 2016 10:07:07 +0200	[thread overview]
Message-ID: <1476432427-2724-1-git-send-email-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <1475773241-5714-1-git-send-email-maxime.coquelin@redhat.com>

If offloading features are not negotiated, parsing the virtio header
is not needed.

Micro-benchmark with testpmd shows that the gain is +4% with indirect
descriptors, +1% when using direct descriptors.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
Changes since v2:
=================
 - Simplify code by translating first desc address
   unconditionnaly (Yuanhan)
 - Instead of checking features again, check whether
   hdr has been assign to call offload function.

Changes since v1:
=================
 - Rebased
 - Fix early out check in vhost_dequeue_offload

 lib/librte_vhost/virtio_net.c | 33 +++++++++++++++++++++++++--------
 1 file changed, 25 insertions(+), 8 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 812e5d3..15ef0b0 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -555,6 +555,18 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
 		return virtio_dev_rx(dev, queue_id, pkts, count);
 }
 
+static inline bool
+virtio_net_with_host_offload(struct virtio_net *dev)
+{
+	if (dev->features &
+			(VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_ECN |
+			 VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
+			 VIRTIO_NET_F_HOST_UFO))
+		return true;
+
+	return false;
+}
+
 static void
 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
 {
@@ -607,6 +619,9 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
 	void *l4_hdr = NULL;
 	struct tcp_hdr *tcp_hdr = NULL;
 
+	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
+		return;
+
 	parse_ethernet(m, &l4_proto, &l4_hdr);
 	if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 		if (hdr->csum_start == (m->l2_len + m->l3_len)) {
@@ -702,7 +717,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
 	uint32_t mbuf_avail, mbuf_offset;
 	uint32_t cpy_len;
 	struct rte_mbuf *cur = m, *prev = m;
-	struct virtio_net_hdr *hdr;
+	struct virtio_net_hdr *hdr = NULL;
 	/* A counter to avoid desc dead loop chain */
 	uint32_t nr_desc = 1;
 
@@ -715,8 +730,10 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
 	if (unlikely(!desc_addr))
 		return -1;
 
-	hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
-	rte_prefetch0(hdr);
+	if (virtio_net_with_host_offload(dev)) {
+		hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
+		rte_prefetch0(hdr);
+	}
 
 	/*
 	 * A virtio driver normally uses at least 2 desc buffers
@@ -733,18 +750,18 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
 		if (unlikely(!desc_addr))
 			return -1;
 
-		rte_prefetch0((void *)(uintptr_t)desc_addr);
-
 		desc_offset = 0;
 		desc_avail  = desc->len;
 		nr_desc    += 1;
-
-		PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0);
 	} else {
 		desc_avail  = desc->len - dev->vhost_hlen;
 		desc_offset = dev->vhost_hlen;
 	}
 
+	rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset));
+
+	PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), desc_avail, 0);
+
 	mbuf_offset = 0;
 	mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
 	while (1) {
@@ -831,7 +848,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
 	prev->data_len = mbuf_offset;
 	m->pkt_len    += mbuf_offset;
 
-	if (hdr->flags != 0 || hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE)
+	if (hdr)
 		vhost_dequeue_offload(hdr, m);
 
 	return 0;
-- 
2.7.4

  parent reply	other threads:[~2016-10-14  8:07 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-10-06 17:00 [dpdk-dev] [PATCH] " Maxime Coquelin
2016-10-06 17:06 ` Maxime Coquelin
2016-10-11  7:45 ` [dpdk-dev] [PATCH v2] " Maxime Coquelin
2016-10-11  9:01   ` Yuanhan Liu
2016-10-14  7:24     ` Maxime Coquelin
2016-10-14  8:07 ` Maxime Coquelin [this message]
2016-10-18 14:30   ` [dpdk-dev] [PATCH v3] " Yuanhan Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1476432427-2724-1-git-send-email-maxime.coquelin@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=dev@dpdk.org \
    --cc=jianfeng.tan@intel.com \
    --cc=mst@redhat.com \
    --cc=olivier.matz@6wind.com \
    --cc=stephen@networkplumber.org \
    --cc=yuanhan.liu@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).