From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id EABA7A04FD;
	Wed, 26 Oct 2022 12:13:09 +0200 (CEST)
Received: from [217.70.189.124] (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id D9FEF42C51;
	Wed, 26 Oct 2022 12:12:34 +0200 (CEST)
Received: from mga14.intel.com (mga14.intel.com [192.55.52.115])
 by mails.dpdk.org (Postfix) with ESMTP id 787CB42BF5
 for <dev@dpdk.org>; Wed, 26 Oct 2022 12:12:26 +0200 (CEST)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple;
 d=intel.com; i=@intel.com; q=dns/txt; s=Intel;
 t=1666779146; x=1698315146;
 h=from:to:cc:subject:date:message-id:in-reply-to:
 references:mime-version:content-transfer-encoding;
 bh=r9O4jnTCIDFHgB7CL0NKEnYSYdmL14KaJkRbWV0zKnk=;
 b=F1zQkgLdZOgFbOcOGv35AN3O3xca9FCzWYgnrR+mcKCgdrrnQAm1Sohm
 Zw3xhpFaBpuxYMW/VgjSQU5xvW33dYEGVSvyWwAxbHl9KnHoNnNz/6uEX
 k1/2tgN43s5WsaTfgiLJcq2Yhwaj+0P+2BSVu0MXzwmNU93QLy+QIOw2x
 lsJIdSmNka4kbWa2eFQGmNheRkVN9gFyQQ6cSbTTKYJpP/KMh4x02q7r6
 UMR+877VOtEVVBHuNuHnBNZa2TCFnzkwU4YRkML1gzm95GqzU52GnVdJO
 OdaXjeuU/tRmCgUrLg24kBhrrIMLvba2Vc34aqAgMlhipRRbXUFMUMsVI A==;
X-IronPort-AV: E=McAfee;i="6500,9779,10511"; a="307904653"
X-IronPort-AV: E=Sophos;i="5.95,214,1661842800"; d="scan'208";a="307904653"
Received: from fmsmga008.fm.intel.com ([10.253.24.58])
 by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 26 Oct 2022 03:12:26 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=McAfee;i="6500,9779,10511"; a="695306299"
X-IronPort-AV: E=Sophos;i="5.95,214,1661842800"; d="scan'208";a="695306299"
Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])
 by fmsmga008.fm.intel.com with ESMTP; 26 Oct 2022 03:12:24 -0700
From: Junfeng Guo <junfeng.guo@intel.com>
To: andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com, jingjing.wu@intel.com,
 beilei.xing@intel.com
Cc: dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,
 Xiaoyun Li <xiaoyun.li@intel.com>
Subject: [PATCH v12 08/18] net/idpf: add queue release
Date: Wed, 26 Oct 2022 18:10:17 +0800
Message-Id: <20221026101027.240583-9-junfeng.guo@intel.com>
X-Mailer: git-send-email 2.34.1
In-Reply-To: <20221026101027.240583-1-junfeng.guo@intel.com>
References: <20221024131227.1062446-2-junfeng.guo@intel.com>
 <20221026101027.240583-1-junfeng.guo@intel.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

Add support for queue operations:
 - rx_queue_release
 - tx_queue_release

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c |  2 +
 drivers/net/idpf/idpf_rxtx.c   | 81 ++++++++++++++++++++++++++++++++++
 drivers/net/idpf/idpf_rxtx.h   |  3 ++
 3 files changed, 86 insertions(+)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 05f087a03c..dee1e03d43 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -63,7 +63,9 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
 	.tx_queue_start			= idpf_tx_queue_start,
 	.tx_queue_stop			= idpf_tx_queue_stop,
 	.rx_queue_setup			= idpf_rx_queue_setup,
+	.rx_queue_release		= idpf_dev_rx_queue_release,
 	.tx_queue_setup			= idpf_tx_queue_setup,
+	.tx_queue_release		= idpf_dev_tx_queue_release,
 	.dev_infos_get			= idpf_dev_info_get,
 	.link_update			= idpf_dev_link_update,
 };
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index ef11855978..382f0090f3 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -171,6 +171,51 @@ reset_split_rx_bufq(struct idpf_rx_queue *rxq)
 	rxq->bufq2 = NULL;
 }
 
+static void
+idpf_rx_queue_release(void *rxq)
+{
+	struct idpf_rx_queue *q = rxq;
+
+	if (q == NULL)
+		return;
+
+	/* Split queue */
+	if (q->bufq1 != NULL && q->bufq2 != NULL) {
+		q->bufq1->ops->release_mbufs(q->bufq1);
+		rte_free(q->bufq1->sw_ring);
+		rte_memzone_free(q->bufq1->mz);
+		rte_free(q->bufq1);
+		q->bufq2->ops->release_mbufs(q->bufq2);
+		rte_free(q->bufq2->sw_ring);
+		rte_memzone_free(q->bufq2->mz);
+		rte_free(q->bufq2);
+		rte_memzone_free(q->mz);
+		rte_free(q);
+		return;
+	}
+
+	/* Single queue */
+	q->ops->release_mbufs(q);
+	rte_free(q->sw_ring);
+	rte_memzone_free(q->mz);
+	rte_free(q);
+}
+
+static void
+idpf_tx_queue_release(void *txq)
+{
+	struct idpf_tx_queue *q = txq;
+
+	if (q == NULL)
+		return;
+
+	rte_free(q->complq);
+	q->ops->release_mbufs(q);
+	rte_free(q->sw_ring);
+	rte_memzone_free(q->mz);
+	rte_free(q);
+}
+
 static inline void
 reset_split_rx_queue(struct idpf_rx_queue *rxq)
 {
@@ -392,6 +437,12 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
 		return -EINVAL;
 
+	/* Free memory if needed */
+	if (dev->data->rx_queues[queue_idx] != NULL) {
+		idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
+		dev->data->rx_queues[queue_idx] = NULL;
+	}
+
 	/* Setup Rx description queue */
 	rxq = rte_zmalloc_socket("idpf rxq",
 				 sizeof(struct idpf_rx_queue),
@@ -524,6 +575,12 @@ idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
 		return -EINVAL;
 
+	/* Free memory if needed */
+	if (dev->data->rx_queues[queue_idx] != NULL) {
+		idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
+		dev->data->rx_queues[queue_idx] = NULL;
+	}
+
 	/* Setup Rx description queue */
 	rxq = rte_zmalloc_socket("idpf rxq",
 				 sizeof(struct idpf_rx_queue),
@@ -630,6 +687,12 @@ idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
 		return -EINVAL;
 
+	/* Free memory if needed. */
+	if (dev->data->tx_queues[queue_idx] != NULL) {
+		idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
+		dev->data->tx_queues[queue_idx] = NULL;
+	}
+
 	/* Allocate the TX queue data structure. */
 	txq = rte_zmalloc_socket("idpf split txq",
 				 sizeof(struct idpf_tx_queue),
@@ -754,6 +817,12 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
 		return -EINVAL;
 
+	/* Free memory if needed. */
+	if (dev->data->tx_queues[queue_idx] != NULL) {
+		idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
+		dev->data->tx_queues[queue_idx] = NULL;
+	}
+
 	/* Allocate the TX queue data structure. */
 	txq = rte_zmalloc_socket("idpf txq",
 				 sizeof(struct idpf_tx_queue),
@@ -1102,6 +1171,18 @@ idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	return 0;
 }
 
+void
+idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+	idpf_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+	idpf_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
 void
 idpf_stop_queues(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index b959638489..37944edcac 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -124,12 +124,15 @@ int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+void idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+
 int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 			uint16_t nb_desc, unsigned int socket_id,
 			const struct rte_eth_txconf *tx_conf);
 int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 void idpf_stop_queues(struct rte_eth_dev *dev);
 #endif /* _IDPF_RXTX_H_ */
-- 
2.34.1