From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id A473BA325E for ; Mon, 21 Oct 2019 16:29:36 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 649501BF98; Mon, 21 Oct 2019 16:28:42 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by dpdk.org (Postfix) with ESMTP id D25C41BF4D for ; Mon, 21 Oct 2019 16:28:15 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 21 Oct 2019 07:28:15 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.67,323,1566889200"; d="scan'208";a="191127165" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by orsmga008.jf.intel.com with ESMTP; 21 Oct 2019 07:28:13 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, stephen@networkplumber.org, gavin.hu@arm.com Cc: dev@dpdk.org, Marvin Liu Date: Tue, 22 Oct 2019 06:08:10 +0800 Message-Id: <20191021220813.55236-11-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20191021220813.55236-1-yong.liu@intel.com> References: <20191021154016.16274-1-yong.liu@intel.com> <20191021220813.55236-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v8 10/13] vhost: optimize packed ring enqueue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Optimize vhost device packed ring enqueue function by splitting batch and single functions. Packets can be filled into one desc will be handled by batch and others will be handled by single as before. Signed-off-by: Marvin Liu Reviewed-by: Maxime Coquelin diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index b09e03fbc..1c63262ce 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -778,64 +778,6 @@ fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, return 0; } -/* - * Returns -1 on fail, 0 on success - */ -static inline int -reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint32_t size, struct buf_vector *buf_vec, - uint16_t *nr_vec, uint16_t *num_buffers, - uint16_t *nr_descs) -{ - uint16_t avail_idx; - uint16_t vec_idx = 0; - uint16_t max_tries, tries = 0; - - uint16_t buf_id = 0; - uint32_t len = 0; - uint16_t desc_count; - - *num_buffers = 0; - avail_idx = vq->last_avail_idx; - - if (rxvq_is_mergeable(dev)) - max_tries = vq->size - 1; - else - max_tries = 1; - - while (size > 0) { - /* - * if we tried all available ring items, and still - * can't get enough buf, it means something abnormal - * happened. - */ - if (unlikely(++tries > max_tries)) - return -1; - - if (unlikely(fill_vec_buf_packed(dev, vq, - avail_idx, &desc_count, - buf_vec, &vec_idx, - &buf_id, &len, - VHOST_ACCESS_RW) < 0)) - return -1; - - len = RTE_MIN(len, size); - update_shadow_used_ring_packed(vq, buf_id, len, desc_count); - size -= len; - - avail_idx += desc_count; - if (avail_idx >= vq->size) - avail_idx -= vq->size; - - *nr_descs += desc_count; - *num_buffers += 1; - } - - *nr_vec = vec_idx; - - return 0; -} - static __rte_noinline void copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, struct buf_vector *buf_vec, @@ -1118,7 +1060,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, return pkt_idx; } -static __rte_unused int +static __rte_always_inline int virtio_dev_rx_batch_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf **pkts) @@ -1194,7 +1136,7 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev, return 0; } -static __rte_unused int16_t +static __rte_always_inline int16_t virtio_dev_rx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf *pkt) @@ -1221,49 +1163,40 @@ virtio_dev_rx_single_packed(struct virtio_net *dev, } static __rte_noinline uint32_t -virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, - struct rte_mbuf **pkts, uint32_t count) +virtio_dev_rx_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct rte_mbuf **pkts, + uint32_t count) { uint32_t pkt_idx = 0; - uint16_t num_buffers; - struct buf_vector buf_vec[BUF_VECTOR_MAX]; + uint32_t remained = count; - for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { - uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; - uint16_t nr_vec = 0; - uint16_t nr_descs = 0; + do { + rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]); - if (unlikely(reserve_avail_buf_packed(dev, vq, - pkt_len, buf_vec, &nr_vec, - &num_buffers, &nr_descs) < 0)) { - VHOST_LOG_DEBUG(VHOST_DATA, - "(%d) failed to get enough desc from vring\n", - dev->vid); - vq->shadow_used_idx -= num_buffers; - break; + if (remained >= PACKED_BATCH_SIZE) { + if (!virtio_dev_rx_batch_packed(dev, vq, pkts)) { + pkt_idx += PACKED_BATCH_SIZE; + remained -= PACKED_BATCH_SIZE; + continue; + } } - VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", - dev->vid, vq->last_avail_idx, - vq->last_avail_idx + num_buffers); - - if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx], - buf_vec, nr_vec, - num_buffers) < 0) { - vq->shadow_used_idx -= num_buffers; + if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx])) break; - } + pkt_idx++; + remained--; - vq_inc_last_avail_packed(vq, nr_descs); - } - - do_data_copy_enqueue(dev, vq); + } while (pkt_idx < count); - if (likely(vq->shadow_used_idx)) { + if (vq->shadow_used_idx) { + do_data_copy_enqueue(dev, vq); vhost_flush_enqueue_shadow_packed(dev, vq); - vhost_vring_call_packed(dev, vq); } + if (pkt_idx) + vhost_vring_call_packed(dev, vq); + return pkt_idx; } -- 2.17.1