From: Ilya Maximets <i.maximets@samsung.com>
To: dev@dpdk.org, Huawei Xie <huawei.xie@intel.com>,
Yuanhan Liu <yuanhan.liu@linux.intel.com>
Cc: Dyasly Sergey <s.dyasly@samsung.com>,
Ilya Maximets <i.maximets@samsung.com>
Subject: [dpdk-dev] [PATCH RFC v3 2/3] vhost: make buf vector for scatter RX local.
Date: Wed, 24 Feb 2016 14:47:17 +0300 [thread overview]
Message-ID: <1456314438-4021-3-git-send-email-i.maximets@samsung.com> (raw)
In-Reply-To: <1456314438-4021-1-git-send-email-i.maximets@samsung.com>
Array of buf_vector's is just an array for temporary storing information
about available descriptors. It used only locally in virtio_dev_merge_rx()
and there is no reason for that array to be shared.
Fix that by allocating local buf_vec inside virtio_dev_merge_rx().
buf_vec field of struct vhost_virtqueue marked as deprecated.
Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
doc/guides/rel_notes/deprecation.rst | 1 +
lib/librte_vhost/rte_virtio_net.h | 2 +-
lib/librte_vhost/vhost_rxtx.c | 49 ++++++++++++++++++------------------
3 files changed, 27 insertions(+), 25 deletions(-)
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index e94d4a2..40f350d 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -7,6 +7,7 @@ API and ABI deprecation notices are to be posted here.
Deprecation Notices
-------------------
+* Field buf_vec of struct vhost_virtqueue have been deprecated.
* The following fields have been deprecated in rte_eth_stats:
ibadcrc, ibadlen, imcasts, fdirmatch, fdirmiss,
diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
index 4a2303a..e6e5cf3 100644
--- a/lib/librte_vhost/rte_virtio_net.h
+++ b/lib/librte_vhost/rte_virtio_net.h
@@ -93,7 +93,7 @@ struct vhost_virtqueue {
int enabled;
uint64_t log_guest_addr; /**< Physical address of used ring, for logging */
uint64_t reserved[15]; /**< Reserve some spaces for future extension. */
- struct buf_vector buf_vec[BUF_VECTOR_MAX]; /**< for scatter RX. */
+ struct buf_vector buf_vec[BUF_VECTOR_MAX] __rte_deprecated; /**< @deprecated Buffer for scatter RX. */
} __rte_cache_aligned;
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 14c2159..a8e2582 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -340,7 +340,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
static inline uint32_t __attribute__((always_inline))
copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
uint16_t res_base_idx, uint16_t res_end_idx,
- struct rte_mbuf *pkt)
+ struct rte_mbuf *pkt, struct buf_vector *buf_vec)
{
uint32_t vec_idx = 0;
uint32_t entry_success = 0;
@@ -371,7 +371,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
*/
vq = dev->virtqueue[queue_id];
- vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+ vb_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
vb_hdr_addr = vb_addr;
/* Prefetch buffer address. */
@@ -386,24 +386,24 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
rte_memcpy((void *)(uintptr_t)vb_hdr_addr,
(const void *)&virtio_hdr, vq->vhost_hlen);
- vhost_log_write(dev, vq->buf_vec[vec_idx].buf_addr, vq->vhost_hlen);
+ vhost_log_write(dev, buf_vec[vec_idx].buf_addr, vq->vhost_hlen);
PRINT_PACKET(dev, (uintptr_t)vb_hdr_addr, vq->vhost_hlen, 1);
seg_avail = rte_pktmbuf_data_len(pkt);
vb_offset = vq->vhost_hlen;
- vb_avail = vq->buf_vec[vec_idx].buf_len - vq->vhost_hlen;
+ vb_avail = buf_vec[vec_idx].buf_len - vq->vhost_hlen;
entry_len = vq->vhost_hlen;
if (vb_avail == 0) {
- uint32_t desc_idx = vq->buf_vec[vec_idx].desc_idx;
+ uint32_t desc_idx = buf_vec[vec_idx].desc_idx;
if ((vq->desc[desc_idx].flags & VRING_DESC_F_NEXT) == 0) {
idx = cur_idx & (vq->size - 1);
/* Update used ring with desc information */
- vq->used->ring[idx].id = vq->buf_vec[vec_idx].desc_idx;
+ vq->used->ring[idx].id = buf_vec[vec_idx].desc_idx;
vq->used->ring[idx].len = entry_len;
vhost_log_used_vring(dev, vq,
@@ -416,12 +416,12 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
}
vec_idx++;
- vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+ vb_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
/* Prefetch buffer address. */
rte_prefetch0((void *)(uintptr_t)vb_addr);
vb_offset = 0;
- vb_avail = vq->buf_vec[vec_idx].buf_len;
+ vb_avail = buf_vec[vec_idx].buf_len;
}
cpy_len = RTE_MIN(vb_avail, seg_avail);
@@ -431,7 +431,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
rte_memcpy((void *)(uintptr_t)(vb_addr + vb_offset),
rte_pktmbuf_mtod_offset(pkt, const void *, seg_offset),
cpy_len);
- vhost_log_write(dev, vq->buf_vec[vec_idx].buf_addr + vb_offset,
+ vhost_log_write(dev, buf_vec[vec_idx].buf_addr + vb_offset,
cpy_len);
PRINT_PACKET(dev,
@@ -450,12 +450,12 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
* entry reach to its end.
* But the segment doesn't complete.
*/
- if ((vq->desc[vq->buf_vec[vec_idx].desc_idx].flags &
+ if ((vq->desc[buf_vec[vec_idx].desc_idx].flags &
VRING_DESC_F_NEXT) == 0) {
/* Update used ring with desc information */
idx = cur_idx & (vq->size - 1);
vq->used->ring[idx].id
- = vq->buf_vec[vec_idx].desc_idx;
+ = buf_vec[vec_idx].desc_idx;
vq->used->ring[idx].len = entry_len;
vhost_log_used_vring(dev, vq,
offsetof(struct vring_used, ring[idx]),
@@ -467,9 +467,9 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
vec_idx++;
vb_addr = gpa_to_vva(dev,
- vq->buf_vec[vec_idx].buf_addr);
+ buf_vec[vec_idx].buf_addr);
vb_offset = 0;
- vb_avail = vq->buf_vec[vec_idx].buf_len;
+ vb_avail = buf_vec[vec_idx].buf_len;
cpy_len = RTE_MIN(vb_avail, seg_avail);
} else {
/*
@@ -488,7 +488,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
* from buf_vec.
*/
uint32_t desc_idx =
- vq->buf_vec[vec_idx].desc_idx;
+ buf_vec[vec_idx].desc_idx;
if ((vq->desc[desc_idx].flags &
VRING_DESC_F_NEXT) == 0) {
@@ -512,9 +512,9 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
/* Get next buffer from buf_vec. */
vec_idx++;
vb_addr = gpa_to_vva(dev,
- vq->buf_vec[vec_idx].buf_addr);
+ buf_vec[vec_idx].buf_addr);
vb_avail =
- vq->buf_vec[vec_idx].buf_len;
+ buf_vec[vec_idx].buf_len;
vb_offset = 0;
}
@@ -528,7 +528,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
/* Update used ring with desc information */
idx = cur_idx & (vq->size - 1);
vq->used->ring[idx].id
- = vq->buf_vec[vec_idx].desc_idx;
+ = buf_vec[vec_idx].desc_idx;
vq->used->ring[idx].len = entry_len;
vhost_log_used_vring(dev, vq,
offsetof(struct vring_used, ring[idx]),
@@ -544,7 +544,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
static inline void __attribute__((always_inline))
update_secure_len(struct vhost_virtqueue *vq, uint32_t id,
- uint32_t *secure_len, uint32_t *vec_idx)
+ uint32_t *secure_len, uint32_t *vec_idx, struct buf_vector *buf_vec)
{
uint16_t wrapped_idx = id & (vq->size - 1);
uint32_t idx = vq->avail->ring[wrapped_idx];
@@ -555,9 +555,9 @@ update_secure_len(struct vhost_virtqueue *vq, uint32_t id,
do {
next_desc = 0;
len += vq->desc[idx].len;
- vq->buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
- vq->buf_vec[vec_id].buf_len = vq->desc[idx].len;
- vq->buf_vec[vec_id].desc_idx = idx;
+ buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
+ buf_vec[vec_id].buf_len = vq->desc[idx].len;
+ buf_vec[vec_id].desc_idx = idx;
vec_id++;
if (vq->desc[idx].flags & VRING_DESC_F_NEXT) {
@@ -582,6 +582,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
uint16_t avail_idx;
uint16_t res_base_idx, res_cur_idx;
uint8_t success = 0;
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
dev->device_fh);
@@ -620,8 +621,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
if (unlikely(res_cur_idx == avail_idx))
goto merge_rx_exit;
- update_secure_len(vq, res_cur_idx,
- &secure_len, &vec_idx);
+ update_secure_len(vq, res_cur_idx, &secure_len,
+ &vec_idx, buf_vec);
res_cur_idx++;
} while (pkt_len > secure_len);
@@ -632,7 +633,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
} while (success == 0);
entry_success = copy_from_mbuf_to_vring(dev, queue_id,
- res_base_idx, res_cur_idx, pkts[pkt_idx]);
+ res_base_idx, res_cur_idx, pkts[pkt_idx], buf_vec);
rte_smp_wmb();
--
2.5.0
next prev parent reply other threads:[~2016-02-24 11:47 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-02-24 11:47 [dpdk-dev] [PATCH RFC v3 0/3] Thread safe rte_vhost_enqueue_burst() Ilya Maximets
2016-02-24 11:47 ` [dpdk-dev] [PATCH RFC v3 1/3] vhost: use SMP barriers instead of compiler ones Ilya Maximets
2016-03-18 10:08 ` Xie, Huawei
2016-03-18 10:23 ` Ilya Maximets
2016-03-18 10:27 ` Xie, Huawei
2016-03-18 10:39 ` Ilya Maximets
2016-03-18 10:47 ` Xie, Huawei
2016-03-18 11:00 ` Ilya Maximets
[not found] ` <C37D651A908B024F974696C65296B57B4C67825C@SHSMSX101.ccr.corp.intel.com>
[not found] ` <56EBE9AE.9070400@samsung.com>
[not found] ` <56EBF256.8040409@samsung.com>
2016-03-18 12:28 ` Ilya Maximets
2016-03-18 12:23 ` [dpdk-dev] [PATCH v4] " Ilya Maximets
2016-03-18 12:41 ` Yuanhan Liu
2016-03-21 4:49 ` Ilya Maximets
2016-03-21 14:07 ` Ananyev, Konstantin
2016-03-21 17:25 ` Xie, Huawei
2016-03-21 17:36 ` Ananyev, Konstantin
2016-03-23 14:07 ` Xie, Huawei
2016-03-31 13:46 ` Thomas Monjalon
2016-02-24 11:47 ` Ilya Maximets [this message]
2016-02-24 11:47 ` [dpdk-dev] [PATCH RFC v3 3/3] vhost: avoid reordering of used->idx and last_used_idx updating Ilya Maximets
2016-03-17 15:29 ` [dpdk-dev] [PATCH RFC v3 0/3] Thread safe rte_vhost_enqueue_burst() Thomas Monjalon
2016-03-18 8:00 ` Yuanhan Liu
2016-03-18 8:09 ` Thomas Monjalon
2016-03-18 9:16 ` Yuanhan Liu
2016-03-18 9:34 ` Thomas Monjalon
2016-03-18 9:46 ` Yuanhan Liu
2016-03-18 9:55 ` Ilya Maximets
2016-03-18 10:10 ` Xie, Huawei
2016-03-18 10:24 ` Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1456314438-4021-3-git-send-email-i.maximets@samsung.com \
--to=i.maximets@samsung.com \
--cc=dev@dpdk.org \
--cc=huawei.xie@intel.com \
--cc=s.dyasly@samsung.com \
--cc=yuanhan.liu@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).