From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id B1064A2EFC
	for <public@inbox.dpdk.org>; Thu, 19 Sep 2019 10:58:53 +0200 (CEST)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id A9DD01EB97;
	Thu, 19 Sep 2019 10:57:16 +0200 (CEST)
Received: from mga14.intel.com (mga14.intel.com [192.55.52.115])
 by dpdk.org (Postfix) with ESMTP id ACA391E93D
 for <dev@dpdk.org>; Thu, 19 Sep 2019 10:56:48 +0200 (CEST)
X-Amp-Result: SKIPPED(no attachment in message)
X-Amp-File-Uploaded: False
Received: from orsmga001.jf.intel.com ([10.7.209.18])
 by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;
 19 Sep 2019 01:56:48 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.64,523,1559545200"; d="scan'208";a="271146169"
Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142])
 by orsmga001.jf.intel.com with ESMTP; 19 Sep 2019 01:56:46 -0700
From: Marvin Liu <yong.liu@intel.com>
To: maxime.coquelin@redhat.com,
	tiwei.bie@intel.com,
	zhihong.wang@intel.com
Cc: dev@dpdk.org,
	Marvin Liu <yong.liu@intel.com>
Date: Fri, 20 Sep 2019 00:36:43 +0800
Message-Id: <20190919163643.24130-17-yong.liu@intel.com>
X-Mailer: git-send-email 2.17.1
In-Reply-To: <20190919163643.24130-1-yong.liu@intel.com>
References: <20190905161421.55981-2-yong.liu@intel.com>
 <20190919163643.24130-1-yong.liu@intel.com>
Subject: [dpdk-dev] [PATCH v2 16/16] vhost: optimize packed ring dequeue
	when in-order
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

When VIRTIO_F_IN_ORDER feature is negotiated, vhost can optimize dequeue
function by only update first used descriptor.

Signed-off-by: Marvin Liu <yong.liu@intel.com>

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 357517cdd..a7bb4ec79 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -31,6 +31,12 @@ rxvq_is_mergeable(struct virtio_net *dev)
 	return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
 }
 
+static  __rte_always_inline bool
+virtio_net_is_inorder(struct virtio_net *dev)
+{
+	return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
+}
+
 static bool
 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
 {
@@ -213,6 +219,30 @@ flush_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	}
 }
 
+static __rte_always_inline void
+update_dequeue_burst_packed_inorder(struct vhost_virtqueue *vq, uint16_t id)
+{
+	vq->shadow_used_packed[0].id  = id;
+
+	if (!vq->shadow_used_idx) {
+		vq->dequeue_shadow_head = vq->last_used_idx;
+		vq->shadow_used_packed[0].len = 0;
+		vq->shadow_used_packed[0].count = 1;
+		vq->shadow_used_packed[0].used_idx = vq->last_used_idx;
+		vq->shadow_used_packed[0].used_wrap_counter =
+			vq->used_wrap_counter;
+
+		vq->shadow_used_idx = 1;
+
+	}
+
+	vq->last_used_idx += PACKED_DESCS_BURST;
+	if (vq->last_used_idx >= vq->size) {
+		vq->used_wrap_counter ^= 1;
+		vq->last_used_idx -= vq->size;
+	}
+}
+
 static __rte_always_inline void
 update_dequeue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	uint16_t *ids)
@@ -315,7 +345,6 @@ update_dequeue_shadow_packed(struct vhost_virtqueue *vq, uint16_t buf_id,
 		else
 			vq->desc_packed[vq->last_used_idx].flags =
 				VIRTIO_TX_USED_WRAP_FLAG;
-
 	}
 
 	vq->last_used_idx += count;
@@ -326,6 +355,31 @@ update_dequeue_shadow_packed(struct vhost_virtqueue *vq, uint16_t buf_id,
 	}
 }
 
+static __rte_always_inline void
+update_dequeue_shadow_packed_inorder(struct vhost_virtqueue *vq,
+	uint16_t buf_id, uint16_t count)
+{
+	vq->shadow_used_packed[0].id = buf_id;
+
+	if (!vq->shadow_used_idx) {
+		vq->dequeue_shadow_head = vq->last_used_idx;
+
+		vq->shadow_used_packed[0].len = 0;
+		vq->shadow_used_packed[0].count = count;
+		vq->shadow_used_packed[0].used_idx = vq->last_used_idx;
+		vq->shadow_used_packed[0].used_wrap_counter =
+			vq->used_wrap_counter;
+
+		vq->shadow_used_idx = 1;
+	}
+
+	vq->last_used_idx += count;
+
+	if (vq->last_used_idx >= vq->size) {
+		vq->used_wrap_counter ^= 1;
+		vq->last_used_idx -= vq->size;
+	}
+}
 
 static inline void
 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
@@ -1834,7 +1888,12 @@ virtio_dev_tx_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			   pkts[i]->pkt_len);
 	}
 
-	update_dequeue_burst_packed(dev, vq, ids);
+	if (virtio_net_is_inorder(dev))
+		update_dequeue_burst_packed_inorder(vq,
+						    ids[PACKED_BURST_MASK]);
+	else
+		update_dequeue_burst_packed(dev, vq, ids);
+
 	if (virtio_net_with_host_offload(dev)) {
 		UNROLL_PRAGMA(PRAGMA_PARAM)
 		for (i = 0; i < PACKED_DESCS_BURST; i++) {
@@ -1897,7 +1956,10 @@ virtio_dev_tx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 					&desc_count))
 		return -1;
 
-	update_dequeue_shadow_packed(vq, buf_id, desc_count);
+	if (virtio_net_is_inorder(dev))
+		update_dequeue_shadow_packed_inorder(vq, buf_id, desc_count);
+	else
+		update_dequeue_shadow_packed(vq, buf_id, desc_count);
 
 	vq->last_avail_idx += desc_count;
 	if (vq->last_avail_idx >= vq->size) {
-- 
2.17.1