From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A7846A0C43; Fri, 8 Oct 2021 00:13:39 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2A123413DD; Fri, 8 Oct 2021 00:13:32 +0200 (CEST) Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by mails.dpdk.org (Postfix) with ESMTP id 75F8A4124B for ; Fri, 8 Oct 2021 00:13:30 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1633644810; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=V+zZLpnIGikbp5/hW2+ymT7rklSfYR4P0DEz+0tHcHA=; b=Dfvq0T6eXnxUJndkIeKrnq3geKCNWxh8Z9oV9hGzi6SUAkp8RsoL0z6ABAwSofE47PBFPw wx9Wn9Xx19c2NizNj2RYEP6+26fo4swQUH7HXuHADzPl46n7BI90R4/BnZRvhgLkb+2wTL yd/m0MceKwNHNBvZ1eCtKixgOmR+sP4= Received: from mimecast-mx01.redhat.com (mimecast-mx01.redhat.com [209.132.183.4]) (Using TLS) by relay.mimecast.com with ESMTP id us-mta-558-ED6gavXoMVycESD1ZbGzKQ-1; Thu, 07 Oct 2021 18:13:27 -0400 X-MC-Unique: ED6gavXoMVycESD1ZbGzKQ-1 Received: from smtp.corp.redhat.com (int-mx06.intmail.prod.int.phx2.redhat.com [10.5.11.16]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx01.redhat.com (Postfix) with ESMTPS id 6D6A48E71E5; Thu, 7 Oct 2021 22:00:45 +0000 (UTC) Received: from max-t490s.redhat.com (unknown [10.39.208.18]) by smtp.corp.redhat.com (Postfix) with ESMTP id 954285C1D0; Thu, 7 Oct 2021 22:00:43 +0000 (UTC) From: Maxime Coquelin To: dev@dpdk.org, chenbo.xia@intel.com, jiayu.hu@intel.com, yuanx.wang@intel.com, wenwux.ma@intel.com, bruce.richardson@intel.com, john.mcnamara@intel.com Cc: Maxime Coquelin Date: Fri, 8 Oct 2021 00:00:08 +0200 Message-Id: <20211007220013.355530-10-maxime.coquelin@redhat.com> In-Reply-To: <20211007220013.355530-1-maxime.coquelin@redhat.com> References: <20211007220013.355530-1-maxime.coquelin@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.79 on 10.5.11.16 Authentication-Results: relay.mimecast.com; auth=pass smtp.auth=CUSA124A263 smtp.mailfrom=maxime.coquelin@redhat.com X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: 8bit Content-Type: text/plain; charset="US-ASCII" Subject: [dpdk-dev] [RFC 09/14] vhost: remove notion of async descriptor X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Now that IO vectors iterator have been simplified, the rte_vhost_async_desc struct only contains a pointer on the iterator array stored in the async metadata. This patch removes it, and pass directly the iterators array pointer to the transfer_data callback. Doing that, we avoid declaring the descriptor array in the stack, and also avoid the cost of filling it. Signed-off-by: Maxime Coquelin --- examples/vhost/ioat.c | 10 +++++----- examples/vhost/ioat.h | 2 +- lib/vhost/rte_vhost_async.h | 16 ++++------------ lib/vhost/virtio_net.c | 19 ++----------------- 4 files changed, 12 insertions(+), 35 deletions(-) diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c index a8c588deff..9aeeb12fd9 100644 --- a/examples/vhost/ioat.c +++ b/examples/vhost/ioat.c @@ -124,10 +124,10 @@ open_ioat(const char *value) int32_t ioat_transfer_data_cb(int vid, uint16_t queue_id, - struct rte_vhost_async_desc *descs, + struct rte_vhost_iov_iter *iov_iter, struct rte_vhost_async_status *opaque_data, uint16_t count) { - uint32_t i_desc; + uint32_t i_iter; uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id; struct rte_vhost_iov_iter *iter = NULL; unsigned long i_seg; @@ -135,8 +135,8 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id, unsigned short write = cb_tracker[dev_id].next_write; if (!opaque_data) { - for (i_desc = 0; i_desc < count; i_desc++) { - iter = descs[i_desc].iter; + for (i_iter = 0; i_iter < count; i_iter++) { + iter = iov_iter + i_iter; i_seg = 0; if (cb_tracker[dev_id].ioat_space < iter->nr_segs) break; @@ -161,7 +161,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id, /* ring the doorbell */ rte_ioat_perform_ops(dev_id); cb_tracker[dev_id].next_write = write; - return i_desc; + return i_iter; } int32_t diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h index 62e163c585..a4f09ee39b 100644 --- a/examples/vhost/ioat.h +++ b/examples/vhost/ioat.h @@ -29,7 +29,7 @@ int open_ioat(const char *value); int32_t ioat_transfer_data_cb(int vid, uint16_t queue_id, - struct rte_vhost_async_desc *descs, + struct rte_vhost_iov_iter *iov_iter, struct rte_vhost_async_status *opaque_data, uint16_t count); int32_t diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h index 4ea5cfab10..a87ea6ba37 100644 --- a/lib/vhost/rte_vhost_async.h +++ b/lib/vhost/rte_vhost_async.h @@ -26,14 +26,6 @@ struct rte_vhost_iov_iter { unsigned long nr_segs; }; -/** - * dma transfer descriptor - */ -struct rte_vhost_async_desc { - /* memory iov_iter */ - struct rte_vhost_iov_iter *iter; -}; - /** * dma transfer status */ @@ -55,17 +47,17 @@ struct rte_vhost_async_channel_ops { * id of vhost device to perform data copies * @param queue_id * queue id to perform data copies - * @param descs - * an array of DMA transfer memory descriptors + * @param iov_iter + * an array of IOV iterators * @param opaque_data * opaque data pair sending to DMA engine * @param count * number of elements in the "descs" array * @return - * number of descs processed, negative value means error + * number of IOV iterators processed, negative value means error */ int32_t (*transfer_data)(int vid, uint16_t queue_id, - struct rte_vhost_async_desc *descs, + struct rte_vhost_iov_iter *iov_iter, struct rte_vhost_async_status *opaque_data, uint16_t count); /** diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index 5ce4c14a73..b295dc1d39 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -994,15 +994,6 @@ async_iter_reset(struct vhost_async *async) async->iovec_idx = 0; } -static __rte_always_inline void -async_fill_descs(struct vhost_async *async, struct rte_vhost_async_desc *descs) -{ - int i; - - for (i = 0; i < async->iter_idx; i++) - descs[i].iter = async->iov_iter + i; -} - static __rte_always_inline int async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf *m, struct buf_vector *buf_vec, @@ -1549,7 +1540,6 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, uint16_t avail_head; struct vhost_async *async = vq->async; - struct rte_vhost_async_desc async_descs[MAX_PKT_BURST]; struct async_inflight_info *pkts_info = async->pkts_info; uint32_t pkt_err = 0; int32_t n_xfer; @@ -1594,9 +1584,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, if (unlikely(pkt_idx == 0)) return 0; - async_fill_descs(async, async_descs); - - n_xfer = async->ops.transfer_data(dev->vid, queue_id, async_descs, 0, pkt_idx); + n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx); if (unlikely(n_xfer < 0)) { VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n", dev->vid, __func__, queue_id); @@ -1811,7 +1799,6 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, uint16_t num_descs; struct vhost_async *async = vq->async; - struct rte_vhost_async_desc async_descs[MAX_PKT_BURST]; struct async_inflight_info *pkts_info = async->pkts_info; uint32_t pkt_err = 0; uint16_t slot_idx = 0; @@ -1839,9 +1826,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, if (unlikely(pkt_idx == 0)) return 0; - async_fill_descs(async, async_descs); - - n_xfer = async->ops.transfer_data(dev->vid, queue_id, async_descs, 0, pkt_idx); + n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx); if (unlikely(n_xfer < 0)) { VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n", dev->vid, __func__, queue_id); -- 2.31.1