From: Ilya Maximets <i.maximets@samsung.com>
To: dev@dpdk.org, Huawei Xie <huawei.xie@intel.com>,
Yuanhan Liu <yuanhan.liu@linux.intel.com>
Cc: Ilya Maximets <i.maximets@samsung.com>,
Dyasly Sergey <s.dyasly@samsung.com>
Subject: [dpdk-dev] [PATCH RFC 2/4] vhost: make buf vector for scatter RX local.
Date: Fri, 19 Feb 2016 09:32:41 +0300 [thread overview]
Message-ID: <1455863563-15751-3-git-send-email-i.maximets@samsung.com> (raw)
In-Reply-To: <1455863563-15751-1-git-send-email-i.maximets@samsung.com>
Array of buf_vector's is just an array for temporary storing information
about available descriptors. It used only locally in virtio_dev_merge_rx()
and there is no reason for that array to be shared.
Fix that by allocating local buf_vec inside virtio_dev_merge_rx().
Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
lib/librte_vhost/rte_virtio_net.h | 1 -
lib/librte_vhost/vhost_rxtx.c | 45 ++++++++++++++++++++-------------------
2 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
index 10dcb90..ae1e4fb 100644
--- a/lib/librte_vhost/rte_virtio_net.h
+++ b/lib/librte_vhost/rte_virtio_net.h
@@ -91,7 +91,6 @@ struct vhost_virtqueue {
int kickfd; /**< Currently unused as polling mode is enabled. */
int enabled;
uint64_t reserved[16]; /**< Reserve some spaces for future extension. */
- struct buf_vector buf_vec[BUF_VECTOR_MAX]; /**< for scatter RX. */
} __rte_cache_aligned;
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 411dd95..9095fb1 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -295,7 +295,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
static inline uint32_t __attribute__((always_inline))
copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
uint16_t res_base_idx, uint16_t res_end_idx,
- struct rte_mbuf *pkt)
+ struct rte_mbuf *pkt, struct buf_vector *buf_vec)
{
uint32_t vec_idx = 0;
uint32_t entry_success = 0;
@@ -325,7 +325,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
*/
vq = dev->virtqueue[queue_id];
- vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+ vb_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
vb_hdr_addr = vb_addr;
/* Prefetch buffer address. */
@@ -345,19 +345,19 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
seg_avail = rte_pktmbuf_data_len(pkt);
vb_offset = vq->vhost_hlen;
- vb_avail = vq->buf_vec[vec_idx].buf_len - vq->vhost_hlen;
+ vb_avail = buf_vec[vec_idx].buf_len - vq->vhost_hlen;
entry_len = vq->vhost_hlen;
if (vb_avail == 0) {
uint32_t desc_idx =
- vq->buf_vec[vec_idx].desc_idx;
+ buf_vec[vec_idx].desc_idx;
if ((vq->desc[desc_idx].flags
& VRING_DESC_F_NEXT) == 0) {
/* Update used ring with desc information */
vq->used->ring[cur_idx & (vq->size - 1)].id
- = vq->buf_vec[vec_idx].desc_idx;
+ = buf_vec[vec_idx].desc_idx;
vq->used->ring[cur_idx & (vq->size - 1)].len
= entry_len;
@@ -367,12 +367,12 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
}
vec_idx++;
- vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+ vb_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
/* Prefetch buffer address. */
rte_prefetch0((void *)(uintptr_t)vb_addr);
vb_offset = 0;
- vb_avail = vq->buf_vec[vec_idx].buf_len;
+ vb_avail = buf_vec[vec_idx].buf_len;
}
cpy_len = RTE_MIN(vb_avail, seg_avail);
@@ -399,11 +399,11 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
* entry reach to its end.
* But the segment doesn't complete.
*/
- if ((vq->desc[vq->buf_vec[vec_idx].desc_idx].flags &
+ if ((vq->desc[buf_vec[vec_idx].desc_idx].flags &
VRING_DESC_F_NEXT) == 0) {
/* Update used ring with desc information */
vq->used->ring[cur_idx & (vq->size - 1)].id
- = vq->buf_vec[vec_idx].desc_idx;
+ = buf_vec[vec_idx].desc_idx;
vq->used->ring[cur_idx & (vq->size - 1)].len
= entry_len;
entry_len = 0;
@@ -413,9 +413,9 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
vec_idx++;
vb_addr = gpa_to_vva(dev,
- vq->buf_vec[vec_idx].buf_addr);
+ buf_vec[vec_idx].buf_addr);
vb_offset = 0;
- vb_avail = vq->buf_vec[vec_idx].buf_len;
+ vb_avail = buf_vec[vec_idx].buf_len;
cpy_len = RTE_MIN(vb_avail, seg_avail);
} else {
/*
@@ -434,7 +434,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
* from buf_vec.
*/
uint32_t desc_idx =
- vq->buf_vec[vec_idx].desc_idx;
+ buf_vec[vec_idx].desc_idx;
if ((vq->desc[desc_idx].flags &
VRING_DESC_F_NEXT) == 0) {
@@ -456,9 +456,9 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
/* Get next buffer from buf_vec. */
vec_idx++;
vb_addr = gpa_to_vva(dev,
- vq->buf_vec[vec_idx].buf_addr);
+ buf_vec[vec_idx].buf_addr);
vb_avail =
- vq->buf_vec[vec_idx].buf_len;
+ buf_vec[vec_idx].buf_len;
vb_offset = 0;
}
@@ -471,7 +471,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
*/
/* Update used ring with desc information */
vq->used->ring[cur_idx & (vq->size - 1)].id
- = vq->buf_vec[vec_idx].desc_idx;
+ = buf_vec[vec_idx].desc_idx;
vq->used->ring[cur_idx & (vq->size - 1)].len
= entry_len;
entry_success++;
@@ -485,7 +485,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
static inline void __attribute__((always_inline))
update_secure_len(struct vhost_virtqueue *vq, uint32_t id,
- uint32_t *secure_len, uint32_t *vec_idx)
+ uint32_t *secure_len, uint32_t *vec_idx, struct buf_vector *buf_vec)
{
uint16_t wrapped_idx = id & (vq->size - 1);
uint32_t idx = vq->avail->ring[wrapped_idx];
@@ -496,9 +496,9 @@ update_secure_len(struct vhost_virtqueue *vq, uint32_t id,
do {
next_desc = 0;
len += vq->desc[idx].len;
- vq->buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
- vq->buf_vec[vec_id].buf_len = vq->desc[idx].len;
- vq->buf_vec[vec_id].desc_idx = idx;
+ buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
+ buf_vec[vec_id].buf_len = vq->desc[idx].len;
+ buf_vec[vec_id].desc_idx = idx;
vec_id++;
if (vq->desc[idx].flags & VRING_DESC_F_NEXT) {
@@ -523,6 +523,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
uint16_t avail_idx;
uint16_t res_base_idx, res_cur_idx;
uint8_t success = 0;
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
dev->device_fh);
@@ -561,8 +562,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
if (unlikely(res_cur_idx == avail_idx))
goto merge_rx_exit;
- update_secure_len(vq, res_cur_idx,
- &secure_len, &vec_idx);
+ update_secure_len(vq, res_cur_idx, &secure_len,
+ &vec_idx, buf_vec);
res_cur_idx++;
} while (pkt_len > secure_len);
@@ -573,7 +574,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
} while (success == 0);
entry_success = copy_from_mbuf_to_vring(dev, queue_id,
- res_base_idx, res_cur_idx, pkts[pkt_idx]);
+ res_base_idx, res_cur_idx, pkts[pkt_idx], buf_vec);
rte_smp_wmb();
--
2.5.0
next prev parent reply other threads:[~2016-02-19 6:32 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-02-19 6:32 [dpdk-dev] [PATCH RFC 0/4] Thread safe rte_vhost_enqueue_burst() Ilya Maximets
2016-02-19 6:32 ` [dpdk-dev] [PATCH RFC 1/4] vhost: use SMP barriers instead of compiler ones Ilya Maximets
2016-02-19 6:32 ` Ilya Maximets [this message]
2016-02-19 7:06 ` [dpdk-dev] [PATCH RFC 2/4] vhost: make buf vector for scatter RX local Yuanhan Liu
2016-02-19 7:30 ` Ilya Maximets
2016-02-19 8:10 ` Xie, Huawei
2016-04-05 5:47 ` [dpdk-dev] [RFC] vhost-user public struct refactor (was Re: [PATCH RFC 2/4] vhost: make buf vector for scatter RX) local Yuanhan Liu
2016-04-05 8:37 ` Thomas Monjalon
2016-04-05 14:06 ` Yuanhan Liu
2016-04-06 4:14 ` Flavio Leitner
2016-04-06 4:54 ` Yuanhan Liu
2016-02-19 6:32 ` [dpdk-dev] [PATCH RFC 3/4] vhost: avoid reordering of used->idx and last_used_idx updating Ilya Maximets
2016-02-19 6:32 ` [dpdk-dev] [PATCH RFC 4/4] doc: add note about rte_vhost_enqueue_burst thread safety Ilya Maximets
2016-02-19 7:10 ` Yuanhan Liu
2016-02-19 8:36 ` Xie, Huawei
2016-02-19 9:05 ` Ilya Maximets
2016-02-22 2:07 ` Xie, Huawei
2016-02-22 10:14 ` Thomas Monjalon
2016-02-23 5:56 ` Xie, Huawei
2016-02-24 5:06 ` Ilya Maximets
2016-02-25 5:12 ` Xie, Huawei
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1455863563-15751-3-git-send-email-i.maximets@samsung.com \
--to=i.maximets@samsung.com \
--cc=dev@dpdk.org \
--cc=huawei.xie@intel.com \
--cc=s.dyasly@samsung.com \
--cc=yuanhan.liu@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).