From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id C16E0A2EFC
	for <public@inbox.dpdk.org>; Tue, 15 Oct 2019 10:29:55 +0200 (CEST)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id 063A71E905;
	Tue, 15 Oct 2019 10:29:26 +0200 (CEST)
Received: from mga18.intel.com (mga18.intel.com [134.134.136.126])
 by dpdk.org (Postfix) with ESMTP id 05B511E894
 for <dev@dpdk.org>; Tue, 15 Oct 2019 10:29:07 +0200 (CEST)
X-Amp-Result: SKIPPED(no attachment in message)
X-Amp-File-Uploaded: False
Received: from orsmga001.jf.intel.com ([10.7.209.18])
 by orsmga106.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;
 15 Oct 2019 01:29:07 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.67,298,1566889200"; d="scan'208";a="279120688"
Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142])
 by orsmga001.jf.intel.com with ESMTP; 15 Oct 2019 01:29:05 -0700
From: Marvin Liu <yong.liu@intel.com>
To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com,
 stephen@networkplumber.org, gavin.hu@arm.com
Cc: dev@dpdk.org,
	Marvin Liu <yong.liu@intel.com>
Date: Wed, 16 Oct 2019 00:07:33 +0800
Message-Id: <20191015160739.51940-8-yong.liu@intel.com>
X-Mailer: git-send-email 2.17.1
In-Reply-To: <20191015160739.51940-1-yong.liu@intel.com>
References: <20191015143014.1656-1-yong.liu@intel.com>
 <20191015160739.51940-1-yong.liu@intel.com>
Subject: [dpdk-dev] [PATCH v6 07/13] vhost: flush enqueue updates by batch
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

Buffer vhost enqueue shadowed ring flush action buffered number exceed
one batch. Thus virtio can receive packets at a faster frequency.

Signed-off-by: Marvin Liu <yong.liu@intel.com>

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 96bf763b1..a60b88d89 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -166,6 +166,8 @@ struct vhost_virtqueue {
 		struct vring_used_elem_packed *shadow_used_packed;
 	};
 	uint16_t                shadow_used_idx;
+	/* Record packed ring enqueue latest desc cache aligned index */
+	uint16_t		shadow_aligned_idx;
 	struct vhost_vring_addr ring_addrs;
 
 	struct batch_copy_elem	*batch_copy_elems;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 274a28f99..020c9b858 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -91,6 +91,69 @@ update_shadow_used_ring_split(struct vhost_virtqueue *vq,
 	vq->shadow_used_split[i].len = len;
 }
 
+static __rte_always_inline void
+vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
+				  struct vhost_virtqueue *vq)
+{
+	int i;
+	uint16_t used_idx = vq->last_used_idx;
+	uint16_t head_idx = vq->last_used_idx;
+	uint16_t head_flags = 0;
+
+	/* Split loop in two to save memory barriers */
+	for (i = 0; i < vq->shadow_used_idx; i++) {
+		vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
+		vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
+
+		used_idx += vq->shadow_used_packed[i].count;
+		if (used_idx >= vq->size)
+			used_idx -= vq->size;
+	}
+
+	rte_smp_wmb();
+
+	for (i = 0; i < vq->shadow_used_idx; i++) {
+		uint16_t flags;
+
+		if (vq->shadow_used_packed[i].len)
+			flags = VRING_DESC_F_WRITE;
+		else
+			flags = 0;
+
+		if (vq->used_wrap_counter) {
+			flags |= VRING_DESC_F_USED;
+			flags |= VRING_DESC_F_AVAIL;
+		} else {
+			flags &= ~VRING_DESC_F_USED;
+			flags &= ~VRING_DESC_F_AVAIL;
+		}
+
+		if (i > 0) {
+			vq->desc_packed[vq->last_used_idx].flags = flags;
+
+			vhost_log_cache_used_vring(dev, vq,
+					vq->last_used_idx *
+					sizeof(struct vring_packed_desc),
+					sizeof(struct vring_packed_desc));
+		} else {
+			head_idx = vq->last_used_idx;
+			head_flags = flags;
+		}
+
+		vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
+	}
+
+	vq->desc_packed[head_idx].flags = head_flags;
+
+	vhost_log_cache_used_vring(dev, vq,
+				head_idx *
+				sizeof(struct vring_packed_desc),
+				sizeof(struct vring_packed_desc));
+
+	vq->shadow_used_idx = 0;
+	vhost_log_cache_sync(dev, vq);
+}
+
 static __rte_always_inline void
 flush_shadow_used_ring_packed(struct virtio_net *dev,
 			struct vhost_virtqueue *vq)
@@ -194,6 +257,33 @@ do_data_copy_dequeue(struct vhost_virtqueue *vq)
 	vq->batch_copy_nb_elems = 0;
 }
 
+static __rte_always_inline void
+vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
+				   struct vhost_virtqueue *vq,
+				   uint32_t len[],
+				   uint16_t id[],
+				   uint16_t count[],
+				   uint16_t num_buffers)
+{
+	uint16_t i;
+	for (i = 0; i < num_buffers; i++) {
+		/* enqueue shadow flush action aligned with batch num */
+		if (!vq->shadow_used_idx)
+			vq->shadow_aligned_idx = vq->last_used_idx &
+				PACKED_BATCH_MASK;
+		vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
+		vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
+		vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
+		vq->shadow_aligned_idx += count[i];
+		vq->shadow_used_idx++;
+	}
+
+	if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
+		do_data_copy_enqueue(dev, vq);
+		vhost_flush_enqueue_shadow_packed(dev, vq);
+	}
+}
+
 /* avoid write operation when necessary, to lessen cache issues */
 #define ASSIGN_UNLESS_EQUAL(var, val) do {	\
 	if ((var) != (val))			\
@@ -785,6 +875,9 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
 	uint16_t desc_count;
 	uint32_t size = pkt->pkt_len + dev->vhost_hlen;
 	uint16_t num_buffers = 0;
+	uint32_t buffer_len[vq->size];
+	uint16_t buffer_buf_id[vq->size];
+	uint16_t buffer_desc_count[vq->size];
 
 	if (rxvq_is_mergeable(dev))
 		max_tries = vq->size - 1;
@@ -810,6 +903,9 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
 		len = RTE_MIN(len, size);
 		size -= len;
 
+		buffer_len[num_buffers] = len;
+		buffer_buf_id[num_buffers] = buf_id;
+		buffer_desc_count[num_buffers] = desc_count;
 		num_buffers += 1;
 
 		*nr_descs += desc_count;
@@ -821,6 +917,9 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
 	if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
 		return -1;
 
+	vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
+					   buffer_desc_count, num_buffers);
+
 	return 0;
 }
 
@@ -1017,7 +1116,7 @@ virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	do_data_copy_enqueue(dev, vq);
 
 	if (likely(vq->shadow_used_idx)) {
-		flush_shadow_used_ring_packed(dev, vq);
+		vhost_flush_enqueue_shadow_packed(dev, vq);
 		vhost_vring_call_packed(dev, vq);
 	}
 
-- 
2.17.1