From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 9EC9BA2EFC
	for <public@inbox.dpdk.org>; Thu, 19 Sep 2019 10:58:20 +0200 (CEST)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id 39B481EADD;
	Thu, 19 Sep 2019 10:57:02 +0200 (CEST)
Received: from mga14.intel.com (mga14.intel.com [192.55.52.115])
 by dpdk.org (Postfix) with ESMTP id 0F3601E8BF
 for <dev@dpdk.org>; Thu, 19 Sep 2019 10:56:42 +0200 (CEST)
X-Amp-Result: SKIPPED(no attachment in message)
X-Amp-File-Uploaded: False
Received: from orsmga001.jf.intel.com ([10.7.209.18])
 by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;
 19 Sep 2019 01:56:42 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.64,523,1559545200"; d="scan'208";a="271146153"
Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142])
 by orsmga001.jf.intel.com with ESMTP; 19 Sep 2019 01:56:41 -0700
From: Marvin Liu <yong.liu@intel.com>
To: maxime.coquelin@redhat.com,
	tiwei.bie@intel.com,
	zhihong.wang@intel.com
Cc: dev@dpdk.org,
	Marvin Liu <yong.liu@intel.com>
Date: Fri, 20 Sep 2019 00:36:39 +0800
Message-Id: <20190919163643.24130-13-yong.liu@intel.com>
X-Mailer: git-send-email 2.17.1
In-Reply-To: <20190919163643.24130-1-yong.liu@intel.com>
References: <20190905161421.55981-2-yong.liu@intel.com>
 <20190919163643.24130-1-yong.liu@intel.com>
Subject: [dpdk-dev] [PATCH v2 12/16] vhost: add burst and single zero
	dequeue functions
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

Optimize vhost zero copy dequeue path like normal dequeue path.

Signed-off-by: Marvin Liu <yong.liu@intel.com>

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 2418b4e45..a8df74f87 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1909,6 +1909,144 @@ virtio_dev_tx_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return 0;
 }
 
+static __rte_unused __rte_always_inline int
+virtio_dev_tx_burst_packed_zmbuf(struct virtio_net *dev,
+					struct vhost_virtqueue *vq,
+					struct rte_mempool *mbuf_pool,
+					struct rte_mbuf **pkts)
+{
+	struct zcopy_mbuf *zmbufs[PACKED_DESCS_BURST];
+	uintptr_t desc_addrs[PACKED_DESCS_BURST];
+	uint16_t ids[PACKED_DESCS_BURST];
+	int ret;
+	uint16_t i;
+
+	uint16_t avail_idx = vq->last_avail_idx;
+
+	ret = vhost_dequeue_burst_packed(dev, vq, mbuf_pool, pkts, avail_idx,
+					desc_addrs, ids);
+
+	if (ret)
+		return ret;
+
+	UNROLL_PRAGMA(PRAGMA_PARAM)
+	for (i = 0; i < PACKED_DESCS_BURST; i++)
+		zmbufs[i] = get_zmbuf(vq);
+
+	UNROLL_PRAGMA(PRAGMA_PARAM)
+	for (i = 0; i < PACKED_DESCS_BURST; i++) {
+		if (!zmbufs[i])
+			goto free_pkt;
+	}
+
+	UNROLL_PRAGMA(PRAGMA_PARAM)
+	for (i = 0; i < PACKED_DESCS_BURST; i++) {
+		zmbufs[i]->mbuf = pkts[i];
+		zmbufs[i]->desc_idx = avail_idx + i;
+		zmbufs[i]->desc_count = 1;
+	}
+
+	UNROLL_PRAGMA(PRAGMA_PARAM)
+	for (i = 0; i < PACKED_DESCS_BURST; i++)
+		rte_mbuf_refcnt_update(pkts[i], 1);
+
+	UNROLL_PRAGMA(PRAGMA_PARAM)
+	for (i = 0; i < PACKED_DESCS_BURST; i++)
+		TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next);
+
+	vq->nr_zmbuf += PACKED_DESCS_BURST;
+	vq->last_avail_idx += PACKED_DESCS_BURST;
+	if (vq->last_avail_idx >= vq->size) {
+		vq->last_avail_idx -= vq->size;
+		vq->avail_wrap_counter ^= 1;
+	}
+
+	return 0;
+
+free_pkt:
+	UNROLL_PRAGMA(PRAGMA_PARAM)
+	for (i = 0; i < PACKED_DESCS_BURST; i++)
+		rte_pktmbuf_free(pkts[i]);
+
+	return -1;
+}
+
+static __rte_unused int
+virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev,
+	struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
+	struct rte_mbuf **pkts)
+{
+	uint16_t buf_id, desc_count;
+	struct zcopy_mbuf *zmbuf;
+
+	if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
+					&desc_count))
+		return -1;
+
+	zmbuf = get_zmbuf(vq);
+	if (!zmbuf) {
+		rte_pktmbuf_free(*pkts);
+		return -1;
+	}
+	zmbuf->mbuf = *pkts;
+	zmbuf->desc_idx = vq->last_avail_idx;
+	zmbuf->desc_count = desc_count;
+
+	rte_mbuf_refcnt_update(*pkts, 1);
+
+	vq->nr_zmbuf += 1;
+	TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
+
+	vq->last_avail_idx += desc_count;
+	if (vq->last_avail_idx >= vq->size) {
+		vq->last_avail_idx -= vq->size;
+		vq->avail_wrap_counter ^= 1;
+	}
+
+	return 0;
+}
+
+static __rte_unused void
+free_zmbuf(struct vhost_virtqueue *vq)
+{
+	struct zcopy_mbuf *next = NULL;
+	struct zcopy_mbuf *zmbuf;
+
+	for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
+	     zmbuf != NULL; zmbuf = next) {
+		next = TAILQ_NEXT(zmbuf, next);
+
+		uint16_t last_used_idx = vq->last_used_idx;
+
+		if (mbuf_is_consumed(zmbuf->mbuf)) {
+			uint16_t flags = 0;
+
+			if (vq->used_wrap_counter)
+				flags = VIRTIO_TX_USED_FLAG;
+			else
+				flags = VIRTIO_TX_USED_WRAP_FLAG;
+
+			vq->desc_packed[last_used_idx].id = zmbuf->desc_idx;
+			vq->desc_packed[last_used_idx].len = 0;
+
+			rte_smp_wmb();
+			vq->desc_packed[last_used_idx].flags = flags;
+
+			vq->last_used_idx += zmbuf->desc_count;
+			if (vq->last_used_idx >= vq->size) {
+				vq->used_wrap_counter ^= 1;
+				vq->last_used_idx -= vq->size;
+			}
+
+			TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+			restore_mbuf(zmbuf->mbuf);
+			rte_pktmbuf_free(zmbuf->mbuf);
+			put_zmbuf(zmbuf);
+			vq->nr_zmbuf -= 1;
+		}
+	}
+}
+
 static __rte_noinline uint16_t
 virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
-- 
2.17.1