From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id AE4C9A04FD;
	Wed, 26 Oct 2022 12:13:03 +0200 (CEST)
Received: from [217.70.189.124] (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id B75E542BD4;
	Wed, 26 Oct 2022 12:12:33 +0200 (CEST)
Received: from mga14.intel.com (mga14.intel.com [192.55.52.115])
 by mails.dpdk.org (Postfix) with ESMTP id 9043542BB2
 for <dev@dpdk.org>; Wed, 26 Oct 2022 12:12:24 +0200 (CEST)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple;
 d=intel.com; i=@intel.com; q=dns/txt; s=Intel;
 t=1666779144; x=1698315144;
 h=from:to:cc:subject:date:message-id:in-reply-to:
 references:mime-version:content-transfer-encoding;
 bh=7tjPVrvN5gmT+PUMR8ijMktcf9VPr16PvL/xoq4iEzY=;
 b=CAo+00SJsXTxED1vaNrOON92c7CNVVwLZkM1dqvUXf2HYNKZ7xZgjTZR
 2oT5Ed+1+HywkYS6XkPfEDMyb8PfX6eAKTZwzj+DuRCEP2oZRHsS1jfmU
 YqdCnHCiwOLkvPN6BdcVPjBS9ElJs7r0rn/7/hscWvyRjj4K5G+vxtJjX
 R7MIaz+PSZ0Gcj51j6RAPW9Xjb9OaEd3rrOpWHIWf9NTW0Ffj2Gxgvdl8
 6SvPF1bQmppfnOgUovzAKETylDTo2kgiRtwgUHrGa7xwmvN2jtNRPuyjJ
 x5kji/gbpD/TgenMrGAG42rIM5jbmTtZlEzvm6UPCkyx2AXi7OcsSKmHa g==;
X-IronPort-AV: E=McAfee;i="6500,9779,10511"; a="307904650"
X-IronPort-AV: E=Sophos;i="5.95,214,1661842800"; d="scan'208";a="307904650"
Received: from fmsmga008.fm.intel.com ([10.253.24.58])
 by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 26 Oct 2022 03:12:24 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=McAfee;i="6500,9779,10511"; a="695306273"
X-IronPort-AV: E=Sophos;i="5.95,214,1661842800"; d="scan'208";a="695306273"
Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])
 by fmsmga008.fm.intel.com with ESMTP; 26 Oct 2022 03:12:22 -0700
From: Junfeng Guo <junfeng.guo@intel.com>
To: andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com, jingjing.wu@intel.com,
 beilei.xing@intel.com
Cc: dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,
 Xiaoyun Li <xiaoyun.li@intel.com>
Subject: [PATCH v12 07/18] net/idpf: add support for queue stop
Date: Wed, 26 Oct 2022 18:10:16 +0800
Message-Id: <20221026101027.240583-8-junfeng.guo@intel.com>
X-Mailer: git-send-email 2.34.1
In-Reply-To: <20221026101027.240583-1-junfeng.guo@intel.com>
References: <20221024131227.1062446-2-junfeng.guo@intel.com>
 <20221026101027.240583-1-junfeng.guo@intel.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

Add support for these device ops:
 - rx_queue_stop
 - tx_queue_stop

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 doc/guides/nics/features/idpf.ini |   1 +
 drivers/net/idpf/idpf_ethdev.c    |  14 ++-
 drivers/net/idpf/idpf_rxtx.c      | 148 ++++++++++++++++++++++++++++++
 drivers/net/idpf/idpf_rxtx.h      |  13 +++
 drivers/net/idpf/idpf_vchnl.c     |  69 ++++++++++++++
 5 files changed, 242 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini
index 46aab2eb61..c25fa5de3f 100644
--- a/doc/guides/nics/features/idpf.ini
+++ b/doc/guides/nics/features/idpf.ini
@@ -4,6 +4,7 @@
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+Queue start/stop     = Y
 Linux                = Y
 x86-32               = Y
 x86-64               = Y
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 29bac29bc3..05f087a03c 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -59,7 +59,9 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
 	.dev_stop			= idpf_dev_stop,
 	.dev_close			= idpf_dev_close,
 	.rx_queue_start			= idpf_rx_queue_start,
+	.rx_queue_stop			= idpf_rx_queue_stop,
 	.tx_queue_start			= idpf_tx_queue_start,
+	.tx_queue_stop			= idpf_tx_queue_stop,
 	.rx_queue_setup			= idpf_rx_queue_setup,
 	.tx_queue_setup			= idpf_tx_queue_setup,
 	.dev_infos_get			= idpf_dev_info_get,
@@ -347,22 +349,26 @@ idpf_dev_start(struct rte_eth_dev *dev)
 
 	if (dev->data->mtu > vport->max_mtu) {
 		PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
-		return -1;
+		goto err_mtu;
 	}
 
 	vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
 
 	if (idpf_start_queues(dev) != 0) {
 		PMD_DRV_LOG(ERR, "Failed to start queues");
-		return -1;
+		goto err_mtu;
 	}
 
 	if (idpf_vc_ena_dis_vport(vport, true) != 0) {
 		PMD_DRV_LOG(ERR, "Failed to enable vport");
-		return -1;
+		goto err_vport;
 	}
 
 	return 0;
+err_vport:
+	idpf_stop_queues(dev);
+err_mtu:
+	return -1;
 }
 
 static int
@@ -372,6 +378,8 @@ idpf_dev_stop(struct rte_eth_dev *dev)
 
 	idpf_vc_ena_dis_vport(vport, false);
 
+	idpf_stop_queues(dev);
+
 	return 0;
 }
 
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 8f73e3f3bb..ef11855978 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -71,6 +71,55 @@ check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
 	return 0;
 }
 
+static void
+release_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+	uint16_t i;
+
+	if (rxq->sw_ring == NULL)
+		return;
+
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		if (rxq->sw_ring[i] != NULL) {
+			rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+			rxq->sw_ring[i] = NULL;
+		}
+	}
+}
+
+static void
+release_txq_mbufs(struct idpf_tx_queue *txq)
+{
+	uint16_t nb_desc, i;
+
+	if (txq == NULL || txq->sw_ring == NULL) {
+		PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+		return;
+	}
+
+	if (txq->sw_nb_desc != 0) {
+		/* For split queue model, descriptor ring */
+		nb_desc = txq->sw_nb_desc;
+	} else {
+		/* For single queue model */
+		nb_desc = txq->nb_tx_desc;
+	}
+	for (i = 0; i < nb_desc; i++) {
+		if (txq->sw_ring[i].mbuf != NULL) {
+			rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+			txq->sw_ring[i].mbuf = NULL;
+		}
+	}
+}
+
+static const struct idpf_rxq_ops def_rxq_ops = {
+	.release_mbufs = release_rxq_mbufs,
+};
+
+static const struct idpf_txq_ops def_txq_ops = {
+	.release_mbufs = release_txq_mbufs,
+};
+
 static void
 reset_split_rx_descq(struct idpf_rx_queue *rxq)
 {
@@ -122,6 +171,14 @@ reset_split_rx_bufq(struct idpf_rx_queue *rxq)
 	rxq->bufq2 = NULL;
 }
 
+static inline void
+reset_split_rx_queue(struct idpf_rx_queue *rxq)
+{
+	reset_split_rx_descq(rxq);
+	reset_split_rx_bufq(rxq->bufq1);
+	reset_split_rx_bufq(rxq->bufq2);
+}
+
 static void
 reset_single_rx_queue(struct idpf_rx_queue *rxq)
 {
@@ -301,6 +358,7 @@ idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
 	bufq->q_set = true;
 	bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +
 			 queue_idx * vport->chunks_info.rx_buf_qtail_spacing);
+	bufq->ops = &def_rxq_ops;
 
 	/* TODO: allow bulk or vec */
 
@@ -527,6 +585,7 @@ idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	dev->data->rx_queues[queue_idx] = rxq;
 	rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +
 			queue_idx * vport->chunks_info.rx_qtail_spacing);
+	rxq->ops = &def_rxq_ops;
 
 	return 0;
 }
@@ -621,6 +680,7 @@ idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	reset_split_tx_descq(txq);
 	txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
 			queue_idx * vport->chunks_info.tx_qtail_spacing);
+	txq->ops = &def_txq_ops;
 
 	/* Allocate the TX completion queue data structure. */
 	txq->complq = rte_zmalloc_socket("idpf splitq cq",
@@ -748,6 +808,7 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	dev->data->tx_queues[queue_idx] = txq;
 	txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
 			queue_idx * vport->chunks_info.tx_qtail_spacing);
+	txq->ops = &def_txq_ops;
 
 	return 0;
 }
@@ -980,3 +1041,90 @@ idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	return err;
 }
 
+int
+idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_rx_queue *rxq;
+	int err;
+
+	if (rx_queue_id >= dev->data->nb_rx_queues)
+		return -EINVAL;
+
+	err = idpf_switch_queue(vport, rx_queue_id, true, false);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+			    rx_queue_id);
+		return err;
+	}
+
+	rxq = dev->data->rx_queues[rx_queue_id];
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		rxq->ops->release_mbufs(rxq);
+		reset_single_rx_queue(rxq);
+	} else {
+		rxq->bufq1->ops->release_mbufs(rxq->bufq1);
+		rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+		reset_split_rx_queue(rxq);
+	}
+	dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
+int
+idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_tx_queue *txq;
+	int err;
+
+	if (tx_queue_id >= dev->data->nb_tx_queues)
+		return -EINVAL;
+
+	err = idpf_switch_queue(vport, tx_queue_id, false, false);
+	if (err != 0) {
+		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+			    tx_queue_id);
+		return err;
+	}
+
+	txq = dev->data->tx_queues[tx_queue_id];
+	txq->ops->release_mbufs(txq);
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		reset_single_tx_queue(txq);
+	} else {
+		reset_split_tx_descq(txq);
+		reset_split_tx_complq(txq->complq);
+	}
+	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
+void
+idpf_stop_queues(struct rte_eth_dev *dev)
+{
+	struct idpf_rx_queue *rxq;
+	struct idpf_tx_queue *txq;
+	int i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		if (rxq == NULL)
+			continue;
+
+		if (idpf_rx_queue_stop(dev, i) != 0)
+			PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i);
+	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (txq == NULL)
+			continue;
+
+		if (idpf_tx_queue_stop(dev, i) != 0)
+			PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", i);
+	}
+}
+
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index ab9b3830fd..b959638489 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -51,6 +51,7 @@ struct idpf_rx_queue {
 	bool q_set;             /* if rx queue has been configured */
 	bool q_started;         /* if rx queue has been started */
 	bool rx_deferred_start; /* don't start this queue in dev start */
+	const struct idpf_rxq_ops *ops;
 
 	/* only valid for split queue mode */
 	uint8_t expected_gen_id;
@@ -97,6 +98,7 @@ struct idpf_tx_queue {
 	bool q_set;		/* if tx queue has been configured */
 	bool q_started;		/* if tx queue has been started */
 	bool tx_deferred_start; /* don't start this queue in dev start */
+	const struct idpf_txq_ops *ops;
 
 	/* only valid for split queue mode */
 	uint16_t sw_nb_desc;
@@ -107,16 +109,27 @@ struct idpf_tx_queue {
 	struct idpf_tx_queue *complq;
 };
 
+struct idpf_rxq_ops {
+	void (*release_mbufs)(struct idpf_rx_queue *rxq);
+};
+
+struct idpf_txq_ops {
+	void (*release_mbufs)(struct idpf_tx_queue *txq);
+};
+
 int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 			uint16_t nb_desc, unsigned int socket_id,
 			const struct rte_eth_rxconf *rx_conf,
 			struct rte_mempool *mp);
 int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 			uint16_t nb_desc, unsigned int socket_id,
 			const struct rte_eth_txconf *tx_conf);
 int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
+void idpf_stop_queues(struct rte_eth_dev *dev);
 #endif /* _IDPF_RXTX_H_ */
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index 11d915cf4e..1ba59929a0 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -887,6 +887,75 @@ idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
 	return err;
 }
 
+#define IDPF_RXTX_QUEUE_CHUNKS_NUM	2
+int
+idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct virtchnl2_del_ena_dis_queues *queue_select;
+	struct virtchnl2_queue_chunk *queue_chunk;
+	uint32_t type;
+	struct idpf_cmd_info args;
+	uint16_t num_chunks;
+	int err, len;
+
+	num_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM;
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+		num_chunks++;
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+		num_chunks++;
+
+	len = sizeof(struct virtchnl2_del_ena_dis_queues) +
+		sizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1);
+	queue_select = rte_zmalloc("queue_select", len, 0);
+	if (queue_select == NULL)
+		return -ENOMEM;
+
+	queue_chunk = queue_select->chunks.chunks;
+	queue_select->chunks.num_chunks = num_chunks;
+	queue_select->vport_id = vport->vport_id;
+
+	type = VIRTCHNL_QUEUE_TYPE_RX;
+	queue_chunk[type].type = type;
+	queue_chunk[type].start_queue_id = vport->chunks_info.rx_start_qid;
+	queue_chunk[type].num_queues = vport->num_rx_q;
+
+	type = VIRTCHNL2_QUEUE_TYPE_TX;
+	queue_chunk[type].type = type;
+	queue_chunk[type].start_queue_id = vport->chunks_info.tx_start_qid;
+	queue_chunk[type].num_queues = vport->num_tx_q;
+
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+		queue_chunk[type].type = type;
+		queue_chunk[type].start_queue_id =
+			vport->chunks_info.rx_buf_start_qid;
+		queue_chunk[type].num_queues = vport->num_rx_bufq;
+	}
+
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+		type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+		queue_chunk[type].type = type;
+		queue_chunk[type].start_queue_id =
+			vport->chunks_info.tx_compl_start_qid;
+		queue_chunk[type].num_queues = vport->num_tx_complq;
+	}
+
+	args.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES :
+		VIRTCHNL2_OP_DISABLE_QUEUES;
+	args.in_args = (u8 *)queue_select;
+	args.in_args_size = len;
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+	err = idpf_execute_vc_cmd(adapter, &args);
+	if (err != 0)
+		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
+			    enable ? "ENABLE" : "DISABLE");
+
+	rte_free(queue_select);
+	return err;
+}
+
 int
 idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
 {
-- 
2.34.1