From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 0780DA2EFC
	for <public@inbox.dpdk.org>; Thu, 19 Sep 2019 10:58:11 +0200 (CEST)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id 30F241EACE;
	Thu, 19 Sep 2019 10:56:59 +0200 (CEST)
Received: from mga14.intel.com (mga14.intel.com [192.55.52.115])
 by dpdk.org (Postfix) with ESMTP id 9EADF1E8BF
 for <dev@dpdk.org>; Thu, 19 Sep 2019 10:56:41 +0200 (CEST)
X-Amp-Result: SKIPPED(no attachment in message)
X-Amp-File-Uploaded: False
Received: from orsmga001.jf.intel.com ([10.7.209.18])
 by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;
 19 Sep 2019 01:56:41 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.64,523,1559545200"; d="scan'208";a="271146147"
Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142])
 by orsmga001.jf.intel.com with ESMTP; 19 Sep 2019 01:56:39 -0700
From: Marvin Liu <yong.liu@intel.com>
To: maxime.coquelin@redhat.com,
	tiwei.bie@intel.com,
	zhihong.wang@intel.com
Cc: dev@dpdk.org,
	Marvin Liu <yong.liu@intel.com>
Date: Fri, 20 Sep 2019 00:36:38 +0800
Message-Id: <20190919163643.24130-12-yong.liu@intel.com>
X-Mailer: git-send-email 2.17.1
In-Reply-To: <20190919163643.24130-1-yong.liu@intel.com>
References: <20190905161421.55981-2-yong.liu@intel.com>
 <20190919163643.24130-1-yong.liu@intel.com>
Subject: [dpdk-dev] [PATCH v2 11/16] vhost: optimize enqueue function of
	packed ring
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

Optimize vhost device Tx datapath by separate functions. Packets can be
filled into one descriptor will be handled by burst and others will be
handled one by one as before. Pre-fetch descriptors in next two cache
lines as hardware will load two cache line data automatically.

Signed-off-by: Marvin Liu <yong.liu@intel.com>

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index c5c86c219..2418b4e45 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -758,64 +758,6 @@ fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return 0;
 }
 
-/*
- * Returns -1 on fail, 0 on success
- */
-static inline int
-reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
-				uint32_t size, struct buf_vector *buf_vec,
-				uint16_t *nr_vec, uint16_t *num_buffers,
-				uint16_t *nr_descs)
-{
-	uint16_t avail_idx;
-	uint16_t vec_idx = 0;
-	uint16_t max_tries, tries = 0;
-
-	uint16_t buf_id = 0;
-	uint32_t len = 0;
-	uint16_t desc_count;
-
-	*num_buffers = 0;
-	avail_idx = vq->last_avail_idx;
-
-	if (rxvq_is_mergeable(dev))
-		max_tries = vq->size - 1;
-	else
-		max_tries = 1;
-
-	while (size > 0) {
-		/*
-		 * if we tried all available ring items, and still
-		 * can't get enough buf, it means something abnormal
-		 * happened.
-		 */
-		if (unlikely(++tries > max_tries))
-			return -1;
-
-		if (unlikely(fill_vec_buf_packed(dev, vq,
-						avail_idx, &desc_count,
-						buf_vec, &vec_idx,
-						&buf_id, &len,
-						VHOST_ACCESS_RW) < 0))
-			return -1;
-
-		len = RTE_MIN(len, size);
-		update_shadow_packed(vq, buf_id, len, desc_count);
-		size -= len;
-
-		avail_idx += desc_count;
-		if (avail_idx >= vq->size)
-			avail_idx -= vq->size;
-
-		*nr_descs += desc_count;
-		*num_buffers += 1;
-	}
-
-	*nr_vec = vec_idx;
-
-	return 0;
-}
-
 static __rte_noinline void
 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		struct buf_vector *buf_vec,
@@ -1108,7 +1050,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return pkt_idx;
 }
 
-static __rte_unused __rte_always_inline int
+static __rte_always_inline int
 virtio_dev_rx_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	 struct rte_mbuf **pkts)
 {
@@ -1193,7 +1135,7 @@ virtio_dev_rx_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return 0;
 }
 
-static __rte_unused int16_t
+static __rte_always_inline int16_t
 virtio_dev_rx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf *pkt)
 {
@@ -1227,46 +1169,41 @@ virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count)
 {
 	uint32_t pkt_idx = 0;
-	uint16_t num_buffers;
-	struct buf_vector buf_vec[BUF_VECTOR_MAX];
-
-	for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
-		uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
-		uint16_t nr_vec = 0;
-		uint16_t nr_descs = 0;
+	uint32_t remained = count;
+	uint16_t fetch_idx;
+	int ret;
+	struct vring_packed_desc *descs = vq->desc_packed;
 
-		if (unlikely(reserve_avail_buf_packed(dev, vq,
-						pkt_len, buf_vec, &nr_vec,
-						&num_buffers, &nr_descs) < 0)) {
-			VHOST_LOG_DEBUG(VHOST_DATA,
-				"(%d) failed to get enough desc from vring\n",
-				dev->vid);
-			vq->shadow_used_idx -= num_buffers;
-			break;
+	do {
+		if ((vq->last_avail_idx & 0x7) == 0) {
+			fetch_idx = vq->last_avail_idx + 8;
+			rte_prefetch0((void *)(uintptr_t)&descs[fetch_idx]);
 		}
 
-		VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
-			dev->vid, vq->last_avail_idx,
-			vq->last_avail_idx + num_buffers);
+		if (remained >= PACKED_DESCS_BURST) {
+			ret = virtio_dev_rx_burst_packed(dev, vq, pkts);
 
-		if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
-						buf_vec, nr_vec,
-						num_buffers) < 0) {
-			vq->shadow_used_idx -= num_buffers;
-			break;
+			if (!ret) {
+				pkt_idx += PACKED_DESCS_BURST;
+				remained -= PACKED_DESCS_BURST;
+				continue;
+			}
 		}
 
-		vq->last_avail_idx += nr_descs;
-		if (vq->last_avail_idx >= vq->size) {
-			vq->last_avail_idx -= vq->size;
-			vq->avail_wrap_counter ^= 1;
-		}
-	}
+		if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
+			break;
 
-	do_data_copy_enqueue(dev, vq);
+		pkt_idx++;
+		remained--;
+
+	} while (pkt_idx < count);
+
+	if (pkt_idx) {
+		if (vq->shadow_used_idx) {
+			do_data_copy_enqueue(dev, vq);
+			flush_enqueue_shadow_packed(dev, vq);
+		}
 
-	if (likely(vq->shadow_used_idx)) {
-		flush_enqueue_shadow_packed(dev, vq);
 		vhost_vring_call_packed(dev, vq);
 	}
 
-- 
2.17.1