From: Huawei Xie <huawei.xie@intel.com>
To: dev@dpdk.org
Cc: ann.zhuangyanying@huawei.com
Subject: [dpdk-dev] [PATCH] vhost: remove lockless enqueue to the virtio ring
Date: Mon, 4 Jan 2016 22:46:27 +0800 [thread overview]
Message-ID: <1451918787-85887-1-git-send-email-huawei.xie@intel.com> (raw)
This patch removes the internal lockless enqueue implmentation.
DPDK doesn't support receiving/transmitting packets from/to the same
queue. Vhost PMD wraps vhost device as normal DPDK port. DPDK
applications normally have their own lock implmentation when enqueue
packets to the same queue of a port.
The atomic cmpset is a costly operation. This patch should help
performance a bit.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
---
lib/librte_vhost/vhost_rxtx.c | 86 +++++++++++++------------------------------
1 file changed, 25 insertions(+), 61 deletions(-)
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index bbf3fac..26a1b9c 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -69,10 +69,8 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
uint64_t buff_hdr_addr = 0;
uint32_t head[MAX_PKT_BURST];
uint32_t head_idx, packet_success = 0;
- uint16_t avail_idx, res_cur_idx;
- uint16_t res_base_idx, res_end_idx;
+ uint16_t avail_idx, res_cur_idx, res_end_idx;
uint16_t free_entries;
- uint8_t success = 0;
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
@@ -88,29 +86,18 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
- /*
- * As many data cores may want access to available buffers,
- * they need to be reserved.
- */
- do {
- res_base_idx = vq->last_used_idx_res;
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-
- free_entries = (avail_idx - res_base_idx);
- /*check that we have enough buffers*/
- if (unlikely(count > free_entries))
- count = free_entries;
-
- if (count == 0)
- return 0;
-
- res_end_idx = res_base_idx + count;
- /* vq->last_used_idx_res is atomically updated. */
- /* TODO: Allow to disable cmpset if no concurrency in application. */
- success = rte_atomic16_cmpset(&vq->last_used_idx_res,
- res_base_idx, res_end_idx);
- } while (unlikely(success == 0));
- res_cur_idx = res_base_idx;
+ avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+ free_entries = (avail_idx - vq->last_used_idx_res);
+ /*check that we have enough buffers*/
+ if (unlikely(count > free_entries))
+ count = free_entries;
+ if (count == 0)
+ return 0;
+
+ res_cur_idx = vq->last_used_idx_res;
+ res_end_idx = res_cur_idx + count;
+ vq->last_used_idx_res = res_end_idx;
+
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
dev->device_fh, res_cur_idx, res_end_idx);
@@ -230,10 +217,6 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
rte_compiler_barrier();
- /* Wait until it's our turn to add our buffer to the used ring. */
- while (unlikely(vq->last_used_idx != res_base_idx))
- rte_pause();
-
*(volatile uint16_t *)&vq->used->idx += count;
vq->last_used_idx = res_end_idx;
@@ -474,7 +457,6 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
uint32_t pkt_idx = 0, entry_success = 0;
uint16_t avail_idx;
uint16_t res_base_idx, res_cur_idx;
- uint8_t success = 0;
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
dev->device_fh);
@@ -496,46 +478,28 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
uint32_t pkt_len = pkts[pkt_idx]->pkt_len + vq->vhost_hlen;
+ uint32_t secure_len = 0;
+ uint32_t vec_idx = 0;
- do {
- /*
- * As many data cores may want access to available
- * buffers, they need to be reserved.
- */
- uint32_t secure_len = 0;
- uint32_t vec_idx = 0;
-
- res_base_idx = vq->last_used_idx_res;
- res_cur_idx = res_base_idx;
+ res_base_idx = res_cur_idx = vq->last_used_idx_res;
- do {
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
- if (unlikely(res_cur_idx == avail_idx))
- goto merge_rx_exit;
+ do {
+ avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+ if (unlikely(res_cur_idx == avail_idx))
+ goto merge_rx_exit;
- update_secure_len(vq, res_cur_idx,
- &secure_len, &vec_idx);
- res_cur_idx++;
- } while (pkt_len > secure_len);
+ update_secure_len(vq, res_cur_idx,
+ &secure_len, &vec_idx);
+ res_cur_idx++;
+ } while (pkt_len > secure_len);
- /* vq->last_used_idx_res is atomically updated. */
- success = rte_atomic16_cmpset(&vq->last_used_idx_res,
- res_base_idx,
- res_cur_idx);
- } while (success == 0);
+ vq->last_used_idx_res = res_cur_idx;
entry_success = copy_from_mbuf_to_vring(dev, queue_id,
res_base_idx, res_cur_idx, pkts[pkt_idx]);
rte_compiler_barrier();
- /*
- * Wait until it's our turn to add our buffer
- * to the used ring.
- */
- while (unlikely(vq->last_used_idx != res_base_idx))
- rte_pause();
-
*(volatile uint16_t *)&vq->used->idx += entry_success;
vq->last_used_idx = res_cur_idx;
}
--
1.8.1.4
next reply other threads:[~2016-01-05 6:41 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-01-04 14:46 Huawei Xie [this message]
2016-01-05 7:16 ` Xie, Huawei
2016-03-14 23:13 ` Thomas Monjalon
2016-03-16 8:20 ` Xie, Huawei
2016-03-16 8:30 ` Yuanhan Liu
2016-01-19 16:25 ` Tan, Jianfeng
2016-01-19 16:43 ` Xie, Huawei
2016-01-19 18:33 ` Polehn, Mike A
2016-01-20 3:39 ` Xie, Huawei
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1451918787-85887-1-git-send-email-huawei.xie@intel.com \
--to=huawei.xie@intel.com \
--cc=ann.zhuangyanying@huawei.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).