From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D5F01A0C44; Mon, 12 Apr 2021 13:47:50 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 99A3F141116; Mon, 12 Apr 2021 13:47:50 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by mails.dpdk.org (Postfix) with ESMTP id 6E8AF14110A for ; Mon, 12 Apr 2021 13:47:48 +0200 (CEST) IronPort-SDR: PDyEWZSnxtin0C93bZn23cf/ITXXxjV4Oa6kKMBhJ9hK+a/jHo20h5uFSxK4Ba7KlW5t1fJa10 KaAipSa/X9Cw== X-IronPort-AV: E=McAfee;i="6000,8403,9951"; a="173662376" X-IronPort-AV: E=Sophos;i="5.82,216,1613462400"; d="scan'208";a="173662376" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Apr 2021 04:47:47 -0700 IronPort-SDR: /UxGcCylbFRPhaNKyBU6EGdPvDSUD8Hkv4ccGtmLU9BDREaqaKk9f9ewJttMC7KeIKwoLD7UTD nnPAF/AU/m6g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.82,216,1613462400"; d="scan'208";a="451421501" Received: from dpdk_jiangcheng.sh.intel.com ([10.67.119.112]) by fmsmga002.fm.intel.com with ESMTP; 12 Apr 2021 04:47:45 -0700 From: Cheng Jiang To: maxime.coquelin@redhat.com, chenbo.xia@intel.com Cc: dev@dpdk.org, jiayu.hu@intel.com, yvonnex.yang@intel.com, yinan.wang@intel.com, yong.liu@intel.com, Cheng Jiang Date: Mon, 12 Apr 2021 11:34:27 +0000 Message-Id: <20210412113430.17587-2-Cheng1.jiang@intel.com> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20210412113430.17587-1-Cheng1.jiang@intel.com> References: <20210317085426.10119-1-Cheng1.jiang@intel.com> <20210412113430.17587-1-Cheng1.jiang@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v5 1/4] vhost: abstract and reorganize async split ring code X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" In order to improve code efficiency and readability when async packed ring support is enabled. This patch abstract some functions like shadow_ring_store and write_back_completed_descs_split. And improve the efficiency of some pointer offset calculation. Signed-off-by: Cheng Jiang --- lib/librte_vhost/virtio_net.c | 146 +++++++++++++++++++--------------- 1 file changed, 84 insertions(+), 62 deletions(-) diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index ff3987860..c43ab0093 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -1458,6 +1458,29 @@ virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx, (vq_size - n_inflight + pkts_idx) & (vq_size - 1); } +static __rte_always_inline void +shadow_ring_store(struct vhost_virtqueue *vq, void *shadow_ring, void *d_ring, + uint16_t s_idx, uint16_t d_idx, + uint16_t count, uint16_t elem_size) +{ + if (d_idx + count <= vq->size) { + rte_memcpy((void *)((uintptr_t)d_ring + d_idx * elem_size), + (void *)((uintptr_t)shadow_ring + s_idx * elem_size), + count * elem_size); + } else { + uint16_t size = vq->size - d_idx; + + rte_memcpy((void *)((uintptr_t)d_ring + d_idx * elem_size), + (void *)((uintptr_t)shadow_ring + s_idx * elem_size), + size * elem_size); + + rte_memcpy((void *)((uintptr_t)d_ring), + (void *)((uintptr_t)shadow_ring + + (s_idx + size) * elem_size), + (count - size) * elem_size); + } +} + static __rte_noinline uint32_t virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t queue_id, @@ -1478,6 +1501,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct rte_vhost_iov_iter *dst_it = it_pool + 1; uint16_t slot_idx = 0; uint16_t segs_await = 0; + uint16_t iovec_idx = 0, it_idx = 0; struct async_inflight_info *pkts_info = vq->async_pkts_info; uint32_t n_pkts = 0, pkt_err = 0; uint32_t num_async_pkts = 0, num_done_pkts = 0; @@ -1513,27 +1537,32 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers, - src_iovec, dst_iovec, src_it, dst_it) < 0) { + &src_iovec[iovec_idx], + &dst_iovec[iovec_idx], + &src_it[it_idx], + &dst_it[it_idx]) < 0) { vq->shadow_used_idx -= num_buffers; break; } slot_idx = (vq->async_pkts_idx + num_async_pkts) & (vq->size - 1); - if (src_it->count) { + if (src_it[it_idx].count) { uint16_t from, to; - async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it); + async_fill_desc(&tdes[pkt_burst_idx++], + &src_it[it_idx], + &dst_it[it_idx]); pkts_info[slot_idx].descs = num_buffers; pkts_info[slot_idx].mbuf = pkts[pkt_idx]; async_pkts_log[num_async_pkts].pkt_idx = pkt_idx; async_pkts_log[num_async_pkts++].last_avail_idx = vq->last_avail_idx; - src_iovec += src_it->nr_segs; - dst_iovec += dst_it->nr_segs; - src_it += 2; - dst_it += 2; - segs_await += src_it->nr_segs; + + iovec_idx += src_it[it_idx].nr_segs; + it_idx += 2; + + segs_await += src_it[it_idx].nr_segs; /** * recover shadow used ring and keep DMA-occupied @@ -1541,23 +1570,12 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, */ from = vq->shadow_used_idx - num_buffers; to = vq->async_desc_idx & (vq->size - 1); - if (num_buffers + to <= vq->size) { - rte_memcpy(&vq->async_descs_split[to], - &vq->shadow_used_split[from], - num_buffers * - sizeof(struct vring_used_elem)); - } else { - int size = vq->size - to; - - rte_memcpy(&vq->async_descs_split[to], - &vq->shadow_used_split[from], - size * - sizeof(struct vring_used_elem)); - rte_memcpy(vq->async_descs_split, - &vq->shadow_used_split[from + - size], (num_buffers - size) * - sizeof(struct vring_used_elem)); - } + + shadow_ring_store(vq, vq->shadow_used_split, + vq->async_descs_split, + from, to, num_buffers, + sizeof(struct vring_used_elem)); + vq->async_desc_idx += num_buffers; vq->shadow_used_idx -= num_buffers; } else @@ -1575,10 +1593,9 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, BUF_VECTOR_MAX))) { n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx); - src_iovec = vec_pool; - dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1); - src_it = it_pool; - dst_it = it_pool + 1; + iovec_idx = 0; + it_idx = 0; + segs_await = 0; vq->async_pkts_inflight_n += n_pkts; @@ -1639,6 +1656,43 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, return pkt_idx; } +static __rte_always_inline void +write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs) +{ + uint16_t nr_left = n_descs; + uint16_t nr_copy; + uint16_t to, from; + + do { + from = vq->last_async_desc_idx & (vq->size - 1); + nr_copy = nr_left + from <= vq->size ? nr_left : + vq->size - from; + to = vq->last_used_idx & (vq->size - 1); + + if (to + nr_copy <= vq->size) { + rte_memcpy(&vq->used->ring[to], + &vq->async_descs_split[from], + nr_copy * + sizeof(struct vring_used_elem)); + } else { + uint16_t size = vq->size - to; + + rte_memcpy(&vq->used->ring[to], + &vq->async_descs_split[from], + size * + sizeof(struct vring_used_elem)); + rte_memcpy(vq->used->ring, + &vq->async_descs_split[from + + size], (nr_copy - size) * + sizeof(struct vring_used_elem)); + } + + vq->last_async_desc_idx += nr_copy; + vq->last_used_idx += nr_copy; + nr_left -= nr_copy; + } while (nr_left > 0); +} + uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, struct rte_mbuf **pkts, uint16_t count) { @@ -1695,39 +1749,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id, vq->async_pkts_inflight_n -= n_pkts_put; if (likely(vq->enabled && vq->access_ok)) { - uint16_t nr_left = n_descs; - uint16_t nr_copy; - uint16_t to; - - /* write back completed descriptors to used ring */ - do { - from = vq->last_async_desc_idx & (vq->size - 1); - nr_copy = nr_left + from <= vq->size ? nr_left : - vq->size - from; - to = vq->last_used_idx & (vq->size - 1); - - if (to + nr_copy <= vq->size) { - rte_memcpy(&vq->used->ring[to], - &vq->async_descs_split[from], - nr_copy * - sizeof(struct vring_used_elem)); - } else { - uint16_t size = vq->size - to; - - rte_memcpy(&vq->used->ring[to], - &vq->async_descs_split[from], - size * - sizeof(struct vring_used_elem)); - rte_memcpy(vq->used->ring, - &vq->async_descs_split[from + - size], (nr_copy - size) * - sizeof(struct vring_used_elem)); - } - - vq->last_async_desc_idx += nr_copy; - vq->last_used_idx += nr_copy; - nr_left -= nr_copy; - } while (nr_left > 0); + write_back_completed_descs_split(vq, n_descs); __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE); vhost_vring_call_split(dev, vq); -- 2.29.2