From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, tiwei.bie@intel.com, david.marchand@redhat.com,
jfreimann@redhat.com, bruce.richardson@intel.com,
zhihong.wang@intel.com, konstantin.ananyev@intel.com,
mattias.ronnblom@ericsson.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [dpdk-dev] [PATCH v3 4/5] vhost: simplify descriptor's buffer prefetching
Date: Wed, 29 May 2019 15:04:19 +0200 [thread overview]
Message-ID: <20190529130420.6428-5-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20190529130420.6428-1-maxime.coquelin@redhat.com>
Now that we have a single function to map the descriptors
buffers, let's prefetch them there as it is the earliest
place we can do it.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Tiwei Bie <tiwei.bie@intel.com>
---
lib/librte_vhost/virtio_net.c | 32 ++------------------------------
1 file changed, 2 insertions(+), 30 deletions(-)
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 4564e9bcc9..8f0e784f77 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -286,6 +286,8 @@ map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (unlikely(!desc_addr))
return -1;
+ rte_prefetch0((void *)(uintptr_t)desc_addr);
+
buf_vec[vec_id].buf_iova = desc_iova;
buf_vec[vec_id].buf_addr = desc_addr;
buf_vec[vec_id].buf_len = desc_chunck_len;
@@ -666,9 +668,6 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
- if (nr_vec > 1)
- rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
-
if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
error = -1;
goto out;
@@ -711,10 +710,6 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
- /* Prefetch next buffer address. */
- if (vec_idx + 1 < nr_vec)
- rte_prefetch0((void *)(uintptr_t)
- buf_vec[vec_idx + 1].buf_addr);
buf_offset = 0;
buf_avail = buf_len;
}
@@ -812,8 +807,6 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
break;
}
- rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
-
VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
dev->vid, vq->last_avail_idx,
vq->last_avail_idx + num_buffers);
@@ -861,8 +854,6 @@ virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
break;
}
- rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
-
VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
dev->vid, vq->last_avail_idx,
vq->last_avail_idx + num_buffers);
@@ -1118,9 +1109,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
goto out;
}
- if (likely(nr_vec > 1))
- rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
-
if (virtio_net_with_host_offload(dev)) {
if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
/*
@@ -1131,7 +1119,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
hdr = &tmp_hdr;
} else {
hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
- rte_prefetch0(hdr);
}
}
@@ -1161,9 +1148,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
}
- rte_prefetch0((void *)(uintptr_t)
- (buf_addr + buf_offset));
-
PRINT_PACKET(dev,
(uintptr_t)(buf_addr + buf_offset),
(uint32_t)buf_avail, 0);
@@ -1229,14 +1213,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
- /*
- * Prefecth desc n + 1 buffer while
- * desc n buffer is processed.
- */
- if (vec_idx + 1 < nr_vec)
- rte_prefetch0((void *)(uintptr_t)
- buf_vec[vec_idx + 1].buf_addr);
-
buf_offset = 0;
buf_avail = buf_len;
@@ -1380,8 +1356,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (likely(dev->dequeue_zero_copy == 0))
update_shadow_used_ring_split(vq, head_idx, 0);
- rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
-
pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(pkts[i] == NULL)) {
RTE_LOG(ERR, VHOST_DATA,
@@ -1491,8 +1465,6 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
update_shadow_used_ring_packed(vq, buf_id, 0,
desc_count);
- rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
-
pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(pkts[i] == NULL)) {
RTE_LOG(ERR, VHOST_DATA,
--
2.21.0
next prev parent reply other threads:[~2019-05-29 13:05 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-29 13:04 [dpdk-dev] [PATCH v3 0/5] vhost: I-cache pressure optimizations Maxime Coquelin
2019-05-29 13:04 ` [dpdk-dev] [PATCH v3 1/5] vhost: un-inline dirty pages logging functions Maxime Coquelin
2019-05-29 13:04 ` [dpdk-dev] [PATCH v3 2/5] vhost: do not inline packed and split functions Maxime Coquelin
2019-05-29 13:04 ` [dpdk-dev] [PATCH v3 3/5] vhost: do not inline unlikely fragmented buffers code Maxime Coquelin
2019-05-29 13:04 ` Maxime Coquelin [this message]
2019-05-29 13:04 ` [dpdk-dev] [PATCH v3 5/5] eal/x86: force inlining of all memcpy and mov helpers Maxime Coquelin
2019-06-05 12:53 ` Bruce Richardson
2019-06-06 9:33 ` Maxime Coquelin
2019-06-05 12:32 ` [dpdk-dev] [PATCH v3 0/5] vhost: I-cache pressure optimizations Maxime Coquelin
2019-06-05 12:52 ` Bruce Richardson
2019-06-05 13:00 ` Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190529130420.6428-5-maxime.coquelin@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=bruce.richardson@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=jfreimann@redhat.com \
--cc=konstantin.ananyev@intel.com \
--cc=mattias.ronnblom@ericsson.com \
--cc=tiwei.bie@intel.com \
--cc=zhihong.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).