From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, jiayu.hu@intel.com,
yuanx.wang@intel.com, wenwux.ma@intel.com,
bruce.richardson@intel.com, john.mcnamara@intel.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [dpdk-dev] [RFC 09/14] vhost: remove notion of async descriptor
Date: Fri, 8 Oct 2021 00:00:08 +0200 [thread overview]
Message-ID: <20211007220013.355530-10-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20211007220013.355530-1-maxime.coquelin@redhat.com>
Now that IO vectors iterator have been simplified, the
rte_vhost_async_desc struct only contains a pointer on
the iterator array stored in the async metadata.
This patch removes it, and pass directly the iterators
array pointer to the transfer_data callback. Doing that,
we avoid declaring the descriptor array in the stack, and
also avoid the cost of filling it.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
examples/vhost/ioat.c | 10 +++++-----
examples/vhost/ioat.h | 2 +-
lib/vhost/rte_vhost_async.h | 16 ++++------------
lib/vhost/virtio_net.c | 19 ++-----------------
4 files changed, 12 insertions(+), 35 deletions(-)
diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index a8c588deff..9aeeb12fd9 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -124,10 +124,10 @@ open_ioat(const char *value)
int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
- struct rte_vhost_async_desc *descs,
+ struct rte_vhost_iov_iter *iov_iter,
struct rte_vhost_async_status *opaque_data, uint16_t count)
{
- uint32_t i_desc;
+ uint32_t i_iter;
uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
struct rte_vhost_iov_iter *iter = NULL;
unsigned long i_seg;
@@ -135,8 +135,8 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
unsigned short write = cb_tracker[dev_id].next_write;
if (!opaque_data) {
- for (i_desc = 0; i_desc < count; i_desc++) {
- iter = descs[i_desc].iter;
+ for (i_iter = 0; i_iter < count; i_iter++) {
+ iter = iov_iter + i_iter;
i_seg = 0;
if (cb_tracker[dev_id].ioat_space < iter->nr_segs)
break;
@@ -161,7 +161,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
/* ring the doorbell */
rte_ioat_perform_ops(dev_id);
cb_tracker[dev_id].next_write = write;
- return i_desc;
+ return i_iter;
}
int32_t
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 62e163c585..a4f09ee39b 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -29,7 +29,7 @@ int open_ioat(const char *value);
int32_t
ioat_transfer_data_cb(int vid, uint16_t queue_id,
- struct rte_vhost_async_desc *descs,
+ struct rte_vhost_iov_iter *iov_iter,
struct rte_vhost_async_status *opaque_data, uint16_t count);
int32_t
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 4ea5cfab10..a87ea6ba37 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -26,14 +26,6 @@ struct rte_vhost_iov_iter {
unsigned long nr_segs;
};
-/**
- * dma transfer descriptor
- */
-struct rte_vhost_async_desc {
- /* memory iov_iter */
- struct rte_vhost_iov_iter *iter;
-};
-
/**
* dma transfer status
*/
@@ -55,17 +47,17 @@ struct rte_vhost_async_channel_ops {
* id of vhost device to perform data copies
* @param queue_id
* queue id to perform data copies
- * @param descs
- * an array of DMA transfer memory descriptors
+ * @param iov_iter
+ * an array of IOV iterators
* @param opaque_data
* opaque data pair sending to DMA engine
* @param count
* number of elements in the "descs" array
* @return
- * number of descs processed, negative value means error
+ * number of IOV iterators processed, negative value means error
*/
int32_t (*transfer_data)(int vid, uint16_t queue_id,
- struct rte_vhost_async_desc *descs,
+ struct rte_vhost_iov_iter *iov_iter,
struct rte_vhost_async_status *opaque_data,
uint16_t count);
/**
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 5ce4c14a73..b295dc1d39 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -994,15 +994,6 @@ async_iter_reset(struct vhost_async *async)
async->iovec_idx = 0;
}
-static __rte_always_inline void
-async_fill_descs(struct vhost_async *async, struct rte_vhost_async_desc *descs)
-{
- int i;
-
- for (i = 0; i < async->iter_idx; i++)
- descs[i].iter = async->iov_iter + i;
-}
-
static __rte_always_inline int
async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf *m, struct buf_vector *buf_vec,
@@ -1549,7 +1540,6 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
uint16_t avail_head;
struct vhost_async *async = vq->async;
- struct rte_vhost_async_desc async_descs[MAX_PKT_BURST];
struct async_inflight_info *pkts_info = async->pkts_info;
uint32_t pkt_err = 0;
int32_t n_xfer;
@@ -1594,9 +1584,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
if (unlikely(pkt_idx == 0))
return 0;
- async_fill_descs(async, async_descs);
-
- n_xfer = async->ops.transfer_data(dev->vid, queue_id, async_descs, 0, pkt_idx);
+ n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
if (unlikely(n_xfer < 0)) {
VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
dev->vid, __func__, queue_id);
@@ -1811,7 +1799,6 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
uint16_t num_descs;
struct vhost_async *async = vq->async;
- struct rte_vhost_async_desc async_descs[MAX_PKT_BURST];
struct async_inflight_info *pkts_info = async->pkts_info;
uint32_t pkt_err = 0;
uint16_t slot_idx = 0;
@@ -1839,9 +1826,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
if (unlikely(pkt_idx == 0))
return 0;
- async_fill_descs(async, async_descs);
-
- n_xfer = async->ops.transfer_data(dev->vid, queue_id, async_descs, 0, pkt_idx);
+ n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
if (unlikely(n_xfer < 0)) {
VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
dev->vid, __func__, queue_id);
--
2.31.1
next prev parent reply other threads:[~2021-10-07 22:13 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-07 21:59 [dpdk-dev] [RFC 00/14] vhost: clean-up and simplify async implementation Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 01/14] vhost: move async data in a dedicated structure Maxime Coquelin
2021-10-14 3:24 ` Hu, Jiayu
2021-10-14 8:54 ` Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 02/14] vhost: hide inflight async structure Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 03/14] vhost: simplify async IO vectors Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 04/14] vhost: simplify async IO vectors iterators Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 05/14] vhost: remove async batch threshold Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 06/14] vhost: introduce specific iovec structure Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 07/14] vhost: remove useless fields in async iterator struct Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 08/14] vhost: improve IO vector logic Maxime Coquelin
2021-10-12 6:05 ` Hu, Jiayu
2021-10-12 8:34 ` Maxime Coquelin
2021-10-07 22:00 ` Maxime Coquelin [this message]
2021-10-07 22:00 ` [dpdk-dev] [RFC 10/14] vhost: simplify async enqueue completion Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 11/14] vhost: simplify getting the first in-flight index Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 12/14] vhost: prepare async for mbuf to desc refactoring Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 13/14] vhost: prepare sync " Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 14/14] vhost: merge sync and async mbuf to desc filling Maxime Coquelin
2021-10-08 12:36 ` [dpdk-dev] [RFC 00/14] vhost: clean-up and simplify async implementation David Marchand
2021-10-12 6:24 ` Hu, Jiayu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211007220013.355530-10-maxime.coquelin@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=bruce.richardson@intel.com \
--cc=chenbo.xia@intel.com \
--cc=dev@dpdk.org \
--cc=jiayu.hu@intel.com \
--cc=john.mcnamara@intel.com \
--cc=wenwux.ma@intel.com \
--cc=yuanx.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).