From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 3B04FA3201
	for <public@inbox.dpdk.org>; Mon, 21 Oct 2019 10:01:42 +0200 (CEST)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id E7E121BE92;
	Mon, 21 Oct 2019 10:00:44 +0200 (CEST)
Received: from mga11.intel.com (mga11.intel.com [192.55.52.93])
 by dpdk.org (Postfix) with ESMTP id 65A195B3A
 for <dev@dpdk.org>; Mon, 21 Oct 2019 10:00:19 +0200 (CEST)
X-Amp-Result: SKIPPED(no attachment in message)
X-Amp-File-Uploaded: False
Received: from fmsmga002.fm.intel.com ([10.253.24.26])
 by fmsmga102.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;
 21 Oct 2019 01:00:19 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.67,322,1566889200"; d="scan'208";a="227238088"
Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142])
 by fmsmga002.fm.intel.com with ESMTP; 21 Oct 2019 01:00:17 -0700
From: Marvin Liu <yong.liu@intel.com>
To: maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com,
 stephen@networkplumber.org, gavin.hu@arm.com
Cc: dev@dpdk.org,
	Marvin Liu <yong.liu@intel.com>
Date: Mon, 21 Oct 2019 23:40:14 +0800
Message-Id: <20191021154016.16274-12-yong.liu@intel.com>
X-Mailer: git-send-email 2.17.1
In-Reply-To: <20191021154016.16274-1-yong.liu@intel.com>
References: <20191015160739.51940-1-yong.liu@intel.com>
 <20191021154016.16274-1-yong.liu@intel.com>
Subject: [dpdk-dev] [PATCH v7 11/13] vhost: add packed ring zcopy batch and
	single dequeue
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

Add vhost packed ring zero copy batch and single dequeue functions like
normal dequeue path.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 2f90c3a7c..e053766f5 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1978,6 +1978,126 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
 	return 0;
 }
 
+static __rte_unused int
+virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev,
+				 struct vhost_virtqueue *vq,
+				 struct rte_mempool *mbuf_pool,
+				 struct rte_mbuf **pkts)
+{
+	struct zcopy_mbuf *zmbufs[PACKED_BATCH_SIZE];
+	uintptr_t desc_addrs[PACKED_BATCH_SIZE];
+	uint16_t ids[PACKED_BATCH_SIZE];
+	uint16_t i;
+
+	uint16_t avail_idx = vq->last_avail_idx;
+
+	if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
+					     avail_idx, desc_addrs, ids))
+		return -1;
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+		zmbufs[i] = get_zmbuf(vq);
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+		if (!zmbufs[i])
+			goto free_pkt;
+	}
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+		zmbufs[i]->mbuf = pkts[i];
+		zmbufs[i]->desc_idx = avail_idx + i;
+		zmbufs[i]->desc_count = 1;
+	}
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+		rte_mbuf_refcnt_update(pkts[i], 1);
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+		TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next);
+
+	vq->nr_zmbuf += PACKED_BATCH_SIZE;
+	vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
+
+	return 0;
+
+free_pkt:
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+		rte_pktmbuf_free(pkts[i]);
+
+	return -1;
+}
+
+static __rte_unused int
+virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev,
+				  struct vhost_virtqueue *vq,
+				  struct rte_mempool *mbuf_pool,
+				  struct rte_mbuf **pkts)
+{
+	uint16_t buf_id, desc_count;
+	struct zcopy_mbuf *zmbuf;
+
+	if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
+					&desc_count))
+		return -1;
+
+	zmbuf = get_zmbuf(vq);
+	if (!zmbuf) {
+		rte_pktmbuf_free(*pkts);
+		return -1;
+	}
+	zmbuf->mbuf = *pkts;
+	zmbuf->desc_idx = vq->last_avail_idx;
+	zmbuf->desc_count = desc_count;
+
+	rte_mbuf_refcnt_update(*pkts, 1);
+
+	vq->nr_zmbuf += 1;
+	TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
+
+	vq_inc_last_avail_packed(vq, desc_count);
+	return 0;
+}
+
+static __rte_always_inline void
+free_zmbuf(struct vhost_virtqueue *vq)
+{
+	struct zcopy_mbuf *next = NULL;
+	struct zcopy_mbuf *zmbuf;
+
+	for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
+	     zmbuf != NULL; zmbuf = next) {
+		next = TAILQ_NEXT(zmbuf, next);
+
+		uint16_t last_used_idx = vq->last_used_idx;
+
+		if (mbuf_is_consumed(zmbuf->mbuf)) {
+			uint16_t flags;
+			flags = vq->desc_packed[last_used_idx].flags;
+			if (vq->used_wrap_counter) {
+				flags |= VRING_DESC_F_USED;
+				flags |= VRING_DESC_F_AVAIL;
+			} else {
+				flags &= ~VRING_DESC_F_USED;
+				flags &= ~VRING_DESC_F_AVAIL;
+			}
+
+			vq->desc_packed[last_used_idx].id = zmbuf->desc_idx;
+			vq->desc_packed[last_used_idx].len = 0;
+
+			rte_smp_wmb();
+			vq->desc_packed[last_used_idx].flags = flags;
+
+			vq_inc_last_used_packed(vq, zmbuf->desc_count);
+
+			TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+			restore_mbuf(zmbuf->mbuf);
+			rte_pktmbuf_free(zmbuf->mbuf);
+			put_zmbuf(zmbuf);
+			vq->nr_zmbuf -= 1;
+		}
+	}
+}
+
 static __rte_noinline uint16_t
 virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
-- 
2.17.1