From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 99F6DA00C2;
	Wed, 30 Nov 2022 16:58:08 +0100 (CET)
Received: from mails.dpdk.org (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 66CA242D50;
	Wed, 30 Nov 2022 16:57:10 +0100 (CET)
Received: from us-smtp-delivery-124.mimecast.com
 (us-smtp-delivery-124.mimecast.com [170.10.133.124])
 by mails.dpdk.org (Postfix) with ESMTP id 4215A42D44
 for <dev@dpdk.org>; Wed, 30 Nov 2022 16:57:08 +0100 (CET)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com;
 s=mimecast20190719; t=1669823827;
 h=from:from:reply-to:subject:subject:date:date:message-id:message-id:
 to:to:cc:cc:mime-version:mime-version:content-type:content-type:
 content-transfer-encoding:content-transfer-encoding:
 in-reply-to:in-reply-to:references:references;
 bh=Bf6SVEOUMuvxIRN4JWpfimskrhOwh/RChIZV0ZrFTVA=;
 b=Zr+FjFNon9XPAnE4Ut+SIzrgIJZZgJyic7PfSZ+HE+jQO46g7H3ucyCgTDuRCsptnJ9sVJ
 hpQB0johI2cxYqMwRYV70Er7RbSjLJ66PRW2wyMcKDSG5Ef06S26Z7EGaq7S2ek3dF3s/5
 Mfqv6eY6aSgpkE7+dfTsdHY3J9Qd6pQ=
Received: from mimecast-mx02.redhat.com (mimecast-mx02.redhat.com
 [66.187.233.88]) by relay.mimecast.com with ESMTP with STARTTLS
 (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id
 us-mta-414-Z0OXPqM_Mou37nZhYjlSoQ-1; Wed, 30 Nov 2022 10:57:06 -0500
X-MC-Unique: Z0OXPqM_Mou37nZhYjlSoQ-1
Received: from smtp.corp.redhat.com (int-mx01.intmail.prod.int.rdu2.redhat.com
 [10.11.54.1])
 (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
 (No client certificate requested)
 by mimecast-mx02.redhat.com (Postfix) with ESMTPS id 8A43F857FAB;
 Wed, 30 Nov 2022 15:57:04 +0000 (UTC)
Received: from max-t490s.redhat.com (unknown [10.39.208.22])
 by smtp.corp.redhat.com (Postfix) with ESMTP id 6980C40C83D9;
 Wed, 30 Nov 2022 15:57:03 +0000 (UTC)
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, david.marchand@redhat.com,
 eperezma@redhat.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH v1 11/21] net/virtio: extract virtqueue init from virtio queue
 init
Date: Wed, 30 Nov 2022 16:56:29 +0100
Message-Id: <20221130155639.150553-12-maxime.coquelin@redhat.com>
In-Reply-To: <20221130155639.150553-1-maxime.coquelin@redhat.com>
References: <20221130155639.150553-1-maxime.coquelin@redhat.com>
MIME-Version: 1.0
X-Scanned-By: MIMEDefang 3.1 on 10.11.54.1
X-Mimecast-Spam-Score: 0
X-Mimecast-Originator: redhat.com
Content-Transfer-Encoding: 8bit
Content-Type: text/plain; charset="US-ASCII"; x-default=true
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

This patch extracts the virtqueue initialization out of
the Virtio ethdev queue initialization, as preliminary
work to provide a way for Virtio-user to allocate its
shadow control virtqueue.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c | 261 ++--------------------------
 drivers/net/virtio/virtqueue.c     | 266 +++++++++++++++++++++++++++++
 drivers/net/virtio/virtqueue.h     |   5 +
 3 files changed, 282 insertions(+), 250 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 46dd5606f6..8f657d2d90 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -221,173 +221,18 @@ virtio_get_nr_vq(struct virtio_hw *hw)
 	return nr_vq;
 }
 
-static void
-virtio_init_vring(struct virtqueue *vq)
-{
-	int size = vq->vq_nentries;
-	uint8_t *ring_mem = vq->vq_ring_virt_mem;
-
-	PMD_INIT_FUNC_TRACE();
-
-	memset(ring_mem, 0, vq->vq_ring_size);
-
-	vq->vq_used_cons_idx = 0;
-	vq->vq_desc_head_idx = 0;
-	vq->vq_avail_idx = 0;
-	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
-	vq->vq_free_cnt = vq->vq_nentries;
-	memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
-	if (virtio_with_packed_queue(vq->hw)) {
-		vring_init_packed(&vq->vq_packed.ring, ring_mem,
-				  VIRTIO_VRING_ALIGN, size);
-		vring_desc_init_packed(vq, size);
-	} else {
-		struct vring *vr = &vq->vq_split.ring;
-
-		vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size);
-		vring_desc_init_split(vr->desc, size);
-	}
-	/*
-	 * Disable device(host) interrupting guest
-	 */
-	virtqueue_disable_intr(vq);
-}
-
 static void
 virtio_control_queue_notify(struct virtqueue *vq, __rte_unused void *cookie)
 {
 	virtqueue_notify(vq);
 }
 
-static int
-virtio_alloc_queue_headers(struct virtqueue *vq, int numa_node, const char *name)
-{
-	char hdr_name[VIRTQUEUE_MAX_NAME_SZ];
-	const struct rte_memzone **hdr_mz;
-	rte_iova_t *hdr_mem;
-	ssize_t size;
-	int queue_type;
-
-	queue_type = virtio_get_queue_type(vq->hw, vq->vq_queue_index);
-	switch (queue_type) {
-	case VTNET_TQ:
-		/*
-		 * For each xmit packet, allocate a virtio_net_hdr
-		 * and indirect ring elements
-		 */
-		size = vq->vq_nentries * sizeof(struct virtio_tx_region);
-		hdr_mz = &vq->txq.hdr_mz;
-		hdr_mem = &vq->txq.hdr_mem;
-		break;
-	case VTNET_CQ:
-		/* Allocate a page for control vq command, data and status */
-		size = rte_mem_page_size();
-		hdr_mz = &vq->cq.hdr_mz;
-		hdr_mem = &vq->cq.hdr_mem;
-		break;
-	case VTNET_RQ:
-		/* fallthrough */
-	default:
-		return 0;
-	}
-
-	snprintf(hdr_name, sizeof(hdr_name), "%s_hdr", name);
-	*hdr_mz = rte_memzone_reserve_aligned(hdr_name, size, numa_node,
-			RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
-	if (*hdr_mz == NULL) {
-		if (rte_errno == EEXIST)
-			*hdr_mz = rte_memzone_lookup(hdr_name);
-		if (*hdr_mz == NULL)
-			return -ENOMEM;
-	}
-
-	memset((*hdr_mz)->addr, 0, size);
-
-	if (vq->hw->use_va)
-		*hdr_mem = (uintptr_t)(*hdr_mz)->addr;
-	else
-		*hdr_mem = (uintptr_t)(*hdr_mz)->iova;
-
-	return 0;
-}
-
-static void
-virtio_free_queue_headers(struct virtqueue *vq)
-{
-	const struct rte_memzone **hdr_mz;
-	rte_iova_t *hdr_mem;
-	int queue_type;
-
-	queue_type = virtio_get_queue_type(vq->hw, vq->vq_queue_index);
-	switch (queue_type) {
-	case VTNET_TQ:
-		hdr_mz = &vq->txq.hdr_mz;
-		hdr_mem = &vq->txq.hdr_mem;
-		break;
-	case VTNET_CQ:
-		hdr_mz = &vq->cq.hdr_mz;
-		hdr_mem = &vq->cq.hdr_mem;
-		break;
-	case VTNET_RQ:
-		/* fallthrough */
-	default:
-		return;
-	}
-
-	rte_memzone_free(*hdr_mz);
-	*hdr_mz = NULL;
-	*hdr_mem = 0;
-}
-
-static int
-virtio_rxq_sw_ring_alloc(struct virtqueue *vq, int numa_node)
-{
-	void *sw_ring;
-	struct rte_mbuf *mbuf;
-	size_t size;
-
-	/* SW ring is only used with vectorized datapath */
-	if (!vq->hw->use_vec_rx)
-		return 0;
-
-	size = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq->vq_nentries) * sizeof(vq->rxq.sw_ring[0]);
-
-	sw_ring = rte_zmalloc_socket("sw_ring", size, RTE_CACHE_LINE_SIZE, numa_node);
-	if (!sw_ring) {
-		PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
-		return -ENOMEM;
-	}
-
-	mbuf = rte_zmalloc_socket("sw_ring", sizeof(*mbuf), RTE_CACHE_LINE_SIZE, numa_node);
-	if (!mbuf) {
-		PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
-		rte_free(sw_ring);
-		return -ENOMEM;
-	}
-
-	vq->rxq.sw_ring = sw_ring;
-	vq->rxq.fake_mbuf = mbuf;
-
-	return 0;
-}
-
-static void
-virtio_rxq_sw_ring_free(struct virtqueue *vq)
-{
-	rte_free(vq->rxq.fake_mbuf);
-	vq->rxq.fake_mbuf = NULL;
-	rte_free(vq->rxq.sw_ring);
-	vq->rxq.sw_ring = NULL;
-}
-
 static int
 virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
 	char vq_name[VIRTQUEUE_MAX_NAME_SZ];
-	const struct rte_memzone *mz = NULL;
-	unsigned int vq_size, size;
+	unsigned int vq_size;
 	struct virtio_hw *hw = dev->data->dev_private;
-	struct virtnet_ctl *cvq = NULL;
 	struct virtqueue *vq;
 	int queue_type = virtio_get_queue_type(hw, queue_idx);
 	int ret;
@@ -414,87 +259,19 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 
 	snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", dev->data->port_id, queue_idx);
 
-	size = RTE_ALIGN_CEIL(sizeof(*vq) +
-				vq_size * sizeof(struct vq_desc_extra),
-				RTE_CACHE_LINE_SIZE);
-
-
-	vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
-				numa_node);
-	if (vq == NULL) {
-		PMD_INIT_LOG(ERR, "can not allocate vq");
+	vq = virtqueue_alloc(hw, queue_idx, vq_size, queue_type, numa_node, vq_name);
+	if (!vq) {
+		PMD_INIT_LOG(ERR, "virtqueue init failed");
 		return -ENOMEM;
 	}
-	hw->vqs[queue_idx] = vq;
 
-	vq->hw = hw;
-	vq->vq_queue_index = queue_idx;
-	vq->vq_nentries = vq_size;
-	if (virtio_with_packed_queue(hw)) {
-		vq->vq_packed.used_wrap_counter = 1;
-		vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
-		vq->vq_packed.event_flags_shadow = 0;
-		if (queue_type == VTNET_RQ)
-			vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
-	}
-
-	/*
-	 * Reserve a memzone for vring elements
-	 */
-	size = vring_size(hw, vq_size, VIRTIO_VRING_ALIGN);
-	vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_VRING_ALIGN);
-	PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
-		     size, vq->vq_ring_size);
-
-	mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
-			numa_node, RTE_MEMZONE_IOVA_CONTIG,
-			VIRTIO_VRING_ALIGN);
-	if (mz == NULL) {
-		if (rte_errno == EEXIST)
-			mz = rte_memzone_lookup(vq_name);
-		if (mz == NULL) {
-			ret = -ENOMEM;
-			goto free_vq;
-		}
-	}
-
-	memset(mz->addr, 0, mz->len);
-
-	vq->mz = mz;
-	if (hw->use_va)
-		vq->vq_ring_mem = (uintptr_t)mz->addr;
-	else
-		vq->vq_ring_mem = mz->iova;
-
-	vq->vq_ring_virt_mem = mz->addr;
-	PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, vq->vq_ring_mem);
-	PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: %p", vq->vq_ring_virt_mem);
-
-	virtio_init_vring(vq);
+	hw->vqs[queue_idx] = vq;
 
-	ret = virtio_alloc_queue_headers(vq, numa_node, vq_name);
-	if (ret) {
-		PMD_INIT_LOG(ERR, "Failed to alloc queue headers");
-		goto free_mz;
-	}
-
-	if (queue_type == VTNET_RQ) {
-		ret = virtio_rxq_sw_ring_alloc(vq, numa_node);
-		if (ret)
-			goto free_hdr_mz;
-	} else if (queue_type == VTNET_TQ) {
-		virtqueue_txq_indirect_headers_init(vq);
-	} else if (queue_type == VTNET_CQ) {
-		cvq = &vq->cq;
-		hw->cvq = cvq;
+	if (queue_type == VTNET_CQ) {
+		hw->cvq = &vq->cq;
 		vq->cq.notify_queue = &virtio_control_queue_notify;
 	}
 
-	if (hw->use_va)
-		vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr);
-	else
-		vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova);
-
 	if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
 		PMD_INIT_LOG(ERR, "setup_queue failed");
 		ret = -EINVAL;
@@ -504,15 +281,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 	return 0;
 
 clean_vq:
-	hw->cvq = NULL;
-	if (queue_type == VTNET_RQ)
-		virtio_rxq_sw_ring_free(vq);
-free_hdr_mz:
-	virtio_free_queue_headers(vq);
-free_mz:
-	rte_memzone_free(mz);
-free_vq:
-	rte_free(vq);
+	if (queue_type == VTNET_CQ)
+		hw->cvq = NULL;
+	virtqueue_free(vq);
 	hw->vqs[queue_idx] = NULL;
 
 	return ret;
@@ -523,7 +294,6 @@ virtio_free_queues(struct virtio_hw *hw)
 {
 	uint16_t nr_vq = virtio_get_nr_vq(hw);
 	struct virtqueue *vq;
-	int queue_type;
 	uint16_t i;
 
 	if (hw->vqs == NULL)
@@ -533,16 +303,7 @@ virtio_free_queues(struct virtio_hw *hw)
 		vq = hw->vqs[i];
 		if (!vq)
 			continue;
-
-		queue_type = virtio_get_queue_type(hw, i);
-		if (queue_type == VTNET_RQ) {
-			rte_free(vq->rxq.fake_mbuf);
-			rte_free(vq->rxq.sw_ring);
-		}
-
-		virtio_free_queue_headers(vq);
-		rte_memzone_free(vq->mz);
-		rte_free(vq);
+		virtqueue_free(vq);
 		hw->vqs[i] = NULL;
 	}
 
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 7a84796513..1d836f2530 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -2,8 +2,12 @@
  * Copyright(c) 2010-2015 Intel Corporation
  */
 #include <stdint.h>
+#include <unistd.h>
 
+#include <rte_eal_paging.h>
+#include <rte_malloc.h>
 #include <rte_mbuf.h>
+#include <rte_memzone.h>
 
 #include "virtqueue.h"
 #include "virtio_logs.h"
@@ -259,3 +263,265 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
 
 	return 0;
 }
+
+
+static void
+virtio_init_vring(struct virtqueue *vq)
+{
+	int size = vq->vq_nentries;
+	uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+	PMD_INIT_FUNC_TRACE();
+
+	memset(ring_mem, 0, vq->vq_ring_size);
+
+	vq->vq_used_cons_idx = 0;
+	vq->vq_desc_head_idx = 0;
+	vq->vq_avail_idx = 0;
+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+	vq->vq_free_cnt = vq->vq_nentries;
+	memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
+	if (virtio_with_packed_queue(vq->hw)) {
+		vring_init_packed(&vq->vq_packed.ring, ring_mem,
+				  VIRTIO_VRING_ALIGN, size);
+		vring_desc_init_packed(vq, size);
+	} else {
+		struct vring *vr = &vq->vq_split.ring;
+
+		vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size);
+		vring_desc_init_split(vr->desc, size);
+	}
+	/*
+	 * Disable device(host) interrupting guest
+	 */
+	virtqueue_disable_intr(vq);
+}
+
+static int
+virtio_alloc_queue_headers(struct virtqueue *vq, int numa_node, const char *name)
+{
+	char hdr_name[VIRTQUEUE_MAX_NAME_SZ];
+	const struct rte_memzone **hdr_mz;
+	rte_iova_t *hdr_mem;
+	ssize_t size;
+	int queue_type;
+
+	queue_type = virtio_get_queue_type(vq->hw, vq->vq_queue_index);
+	switch (queue_type) {
+	case VTNET_TQ:
+		/*
+		 * For each xmit packet, allocate a virtio_net_hdr
+		 * and indirect ring elements
+		 */
+		size = vq->vq_nentries * sizeof(struct virtio_tx_region);
+		hdr_mz = &vq->txq.hdr_mz;
+		hdr_mem = &vq->txq.hdr_mem;
+		break;
+	case VTNET_CQ:
+		/* Allocate a page for control vq command, data and status */
+		size = rte_mem_page_size();
+		hdr_mz = &vq->cq.hdr_mz;
+		hdr_mem = &vq->cq.hdr_mem;
+		break;
+	case VTNET_RQ:
+		/* fallthrough */
+	default:
+		return 0;
+	}
+
+	snprintf(hdr_name, sizeof(hdr_name), "%s_hdr", name);
+	*hdr_mz = rte_memzone_reserve_aligned(hdr_name, size, numa_node,
+			RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
+	if (*hdr_mz == NULL) {
+		if (rte_errno == EEXIST)
+			*hdr_mz = rte_memzone_lookup(hdr_name);
+		if (*hdr_mz == NULL)
+			return -ENOMEM;
+	}
+
+	memset((*hdr_mz)->addr, 0, size);
+
+	if (vq->hw->use_va)
+		*hdr_mem = (uintptr_t)(*hdr_mz)->addr;
+	else
+		*hdr_mem = (uintptr_t)(*hdr_mz)->iova;
+
+	return 0;
+}
+
+static void
+virtio_free_queue_headers(struct virtqueue *vq)
+{
+	const struct rte_memzone **hdr_mz;
+	rte_iova_t *hdr_mem;
+	int queue_type;
+
+	queue_type = virtio_get_queue_type(vq->hw, vq->vq_queue_index);
+	switch (queue_type) {
+	case VTNET_TQ:
+		hdr_mz = &vq->txq.hdr_mz;
+		hdr_mem = &vq->txq.hdr_mem;
+		break;
+	case VTNET_CQ:
+		hdr_mz = &vq->cq.hdr_mz;
+		hdr_mem = &vq->cq.hdr_mem;
+		break;
+	case VTNET_RQ:
+		/* fallthrough */
+	default:
+		return;
+	}
+
+	rte_memzone_free(*hdr_mz);
+	*hdr_mz = NULL;
+	*hdr_mem = 0;
+}
+
+static int
+virtio_rxq_sw_ring_alloc(struct virtqueue *vq, int numa_node)
+{
+	void *sw_ring;
+	struct rte_mbuf *mbuf;
+	size_t size;
+
+	/* SW ring is only used with vectorized datapath */
+	if (!vq->hw->use_vec_rx)
+		return 0;
+
+	size = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq->vq_nentries) * sizeof(vq->rxq.sw_ring[0]);
+
+	sw_ring = rte_zmalloc_socket("sw_ring", size, RTE_CACHE_LINE_SIZE, numa_node);
+	if (!sw_ring) {
+		PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
+		return -ENOMEM;
+	}
+
+	mbuf = rte_zmalloc_socket("sw_ring", sizeof(*mbuf), RTE_CACHE_LINE_SIZE, numa_node);
+	if (!mbuf) {
+		PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
+		rte_free(sw_ring);
+		return -ENOMEM;
+	}
+
+	vq->rxq.sw_ring = sw_ring;
+	vq->rxq.fake_mbuf = mbuf;
+
+	return 0;
+}
+
+static void
+virtio_rxq_sw_ring_free(struct virtqueue *vq)
+{
+	rte_free(vq->rxq.fake_mbuf);
+	vq->rxq.fake_mbuf = NULL;
+	rte_free(vq->rxq.sw_ring);
+	vq->rxq.sw_ring = NULL;
+}
+
+struct virtqueue *
+virtqueue_alloc(struct virtio_hw *hw, uint16_t index, uint16_t num, int type,
+		int node, const char *name)
+{
+	struct virtqueue *vq;
+	const struct rte_memzone *mz;
+	unsigned int size;
+
+	size = sizeof(*vq) + num * sizeof(struct vq_desc_extra);
+	size = RTE_ALIGN_CEIL(size, RTE_CACHE_LINE_SIZE);
+
+	vq = rte_zmalloc_socket(name, size, RTE_CACHE_LINE_SIZE, node);
+	if (vq == NULL) {
+		PMD_INIT_LOG(ERR, "can not allocate vq");
+		return NULL;
+	}
+
+	vq->hw = hw;
+	vq->vq_queue_index = index;
+	vq->vq_nentries = num;
+	if (virtio_with_packed_queue(hw)) {
+		vq->vq_packed.used_wrap_counter = 1;
+		vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+		vq->vq_packed.event_flags_shadow = 0;
+		if (type == VTNET_RQ)
+			vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
+	}
+
+	/*
+	 * Reserve a memzone for vring elements
+	 */
+	size = vring_size(hw, num, VIRTIO_VRING_ALIGN);
+	vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_VRING_ALIGN);
+	PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size);
+
+	mz = rte_memzone_reserve_aligned(name, vq->vq_ring_size, node,
+			RTE_MEMZONE_IOVA_CONTIG, VIRTIO_VRING_ALIGN);
+	if (mz == NULL) {
+		if (rte_errno == EEXIST)
+			mz = rte_memzone_lookup(name);
+		if (mz == NULL)
+			goto free_vq;
+	}
+
+	memset(mz->addr, 0, mz->len);
+	vq->mz = mz;
+	vq->vq_ring_virt_mem = mz->addr;
+
+	if (hw->use_va) {
+		vq->vq_ring_mem = (uintptr_t)mz->addr;
+		vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr);
+	} else {
+		vq->vq_ring_mem = mz->iova;
+		vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova);
+	}
+
+	PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, vq->vq_ring_mem);
+	PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: %p", vq->vq_ring_virt_mem);
+
+	virtio_init_vring(vq);
+
+	if (virtio_alloc_queue_headers(vq, node, name)) {
+		PMD_INIT_LOG(ERR, "Failed to alloc queue headers");
+		goto free_mz;
+	}
+
+	switch (type) {
+	case VTNET_RQ:
+		if (virtio_rxq_sw_ring_alloc(vq, node))
+			goto free_hdr_mz;
+		break;
+	case VTNET_TQ:
+		virtqueue_txq_indirect_headers_init(vq);
+		break;
+	}
+
+	return vq;
+
+free_hdr_mz:
+	virtio_free_queue_headers(vq);
+free_mz:
+	rte_memzone_free(mz);
+free_vq:
+	rte_free(vq);
+
+	return NULL;
+}
+
+void
+virtqueue_free(struct virtqueue *vq)
+{
+	int type;
+
+	type = virtio_get_queue_type(vq->hw, vq->vq_queue_index);
+	switch (type) {
+	case VTNET_RQ:
+		virtio_rxq_sw_ring_free(vq);
+		break;
+	case VTNET_TQ:
+	case VTNET_CQ:
+		virtio_free_queue_headers(vq);
+		break;
+	}
+
+	rte_memzone_free(vq->mz);
+	rte_free(vq);
+}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index d7f8ee79bb..9d4aba11a3 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -385,6 +385,11 @@ int virtqueue_txvq_reset_packed(struct virtqueue *vq);
 
 void virtqueue_txq_indirect_headers_init(struct virtqueue *vq);
 
+struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
+		uint16_t num, int type, int node, const char *name);
+
+void virtqueue_free(struct virtqueue *vq);
+
 static inline int
 virtqueue_full(const struct virtqueue *vq)
 {
-- 
2.38.1