DPDK patches and discussions
 help / color / mirror / Atom feed
From: beilei.xing@intel.com
To: jingjing.wu@intel.com
Cc: dev@dpdk.org, qi.z.zhang@intel.com,
	Beilei Xing <beilei.xing@intel.com>,
	Wenjun Wu <wenjun1.wu@intel.com>
Subject: [PATCH v3 05/15] common/idpf: add vport init/deinit
Date: Tue, 17 Jan 2023 07:26:13 +0000	[thread overview]
Message-ID: <20230117072626.93796-7-beilei.xing@intel.com> (raw)
In-Reply-To: <20230117072626.93796-1-beilei.xing@intel.com>

From: Beilei Xing <beilei.xing@intel.com>

Introduce idpf_vport_init and idpf_vport_deinit functions
in common module.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/common/idpf/idpf_common_device.c   | 115 +++++++++++++++++
 drivers/common/idpf/idpf_common_device.h   |  13 +-
 drivers/common/idpf/idpf_common_virtchnl.c |  18 +--
 drivers/common/idpf/version.map            |   2 +
 drivers/net/idpf/idpf_ethdev.c             | 138 ++-------------------
 5 files changed, 148 insertions(+), 138 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index b2b42443e4..5628fb5c57 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -158,4 +158,119 @@ idpf_adapter_deinit(struct idpf_adapter *adapter)
 	return 0;
 }
 
+int
+idpf_vport_init(struct idpf_vport *vport,
+		struct virtchnl2_create_vport *create_vport_info,
+		void *dev_data)
+{
+	struct virtchnl2_create_vport *vport_info;
+	int i, type, ret;
+
+	ret = idpf_vc_create_vport(vport, create_vport_info);
+	if (ret != 0) {
+		DRV_LOG(ERR, "Failed to create vport.");
+		goto err_create_vport;
+	}
+
+	vport_info = &(vport->vport_info.info);
+	vport->vport_id = vport_info->vport_id;
+	vport->txq_model = vport_info->txq_model;
+	vport->rxq_model = vport_info->rxq_model;
+	vport->num_tx_q = vport_info->num_tx_q;
+	vport->num_tx_complq = vport_info->num_tx_complq;
+	vport->num_rx_q = vport_info->num_rx_q;
+	vport->num_rx_bufq = vport_info->num_rx_bufq;
+	vport->max_mtu = vport_info->max_mtu;
+	rte_memcpy(vport->default_mac_addr,
+		   vport_info->default_mac_addr, ETH_ALEN);
+	vport->rss_algorithm = vport_info->rss_algorithm;
+	vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
+				      vport_info->rss_key_size);
+	vport->rss_lut_size = vport_info->rss_lut_size;
+
+	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+		type = vport_info->chunks.chunks[i].type;
+		switch (type) {
+		case VIRTCHNL2_QUEUE_TYPE_TX:
+			vport->chunks_info.tx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->chunks_info.tx_qtail_start =
+				vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->chunks_info.tx_qtail_spacing =
+				vport_info->chunks.chunks[i].qtail_reg_spacing;
+			break;
+		case VIRTCHNL2_QUEUE_TYPE_RX:
+			vport->chunks_info.rx_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->chunks_info.rx_qtail_start =
+				vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->chunks_info.rx_qtail_spacing =
+				vport_info->chunks.chunks[i].qtail_reg_spacing;
+			break;
+		case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+			vport->chunks_info.tx_compl_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->chunks_info.tx_compl_qtail_start =
+				vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->chunks_info.tx_compl_qtail_spacing =
+				vport_info->chunks.chunks[i].qtail_reg_spacing;
+			break;
+		case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+			vport->chunks_info.rx_buf_start_qid =
+				vport_info->chunks.chunks[i].start_queue_id;
+			vport->chunks_info.rx_buf_qtail_start =
+				vport_info->chunks.chunks[i].qtail_reg_start;
+			vport->chunks_info.rx_buf_qtail_spacing =
+				vport_info->chunks.chunks[i].qtail_reg_spacing;
+			break;
+		default:
+			DRV_LOG(ERR, "Unsupported queue type");
+			break;
+		}
+	}
+
+	vport->dev_data = dev_data;
+
+	vport->rss_key = rte_zmalloc("rss_key",
+				     vport->rss_key_size, 0);
+	if (vport->rss_key == NULL) {
+		DRV_LOG(ERR, "Failed to allocate RSS key");
+		ret = -ENOMEM;
+		goto err_rss_key;
+	}
+
+	vport->rss_lut = rte_zmalloc("rss_lut",
+				     sizeof(uint32_t) * vport->rss_lut_size, 0);
+	if (vport->rss_lut == NULL) {
+		DRV_LOG(ERR, "Failed to allocate RSS lut");
+		ret = -ENOMEM;
+		goto err_rss_lut;
+	}
+
+	return 0;
+
+err_rss_lut:
+	vport->dev_data = NULL;
+	rte_free(vport->rss_key);
+	vport->rss_key = NULL;
+err_rss_key:
+	idpf_vc_destroy_vport(vport);
+err_create_vport:
+	return ret;
+}
+int
+idpf_vport_deinit(struct idpf_vport *vport)
+{
+	rte_free(vport->rss_lut);
+	vport->rss_lut = NULL;
+
+	rte_free(vport->rss_key);
+	vport->rss_key = NULL;
+
+	vport->dev_data = NULL;
+
+	idpf_vc_destroy_vport(vport);
+
+	return 0;
+}
 RTE_LOG_REGISTER_SUFFIX(idpf_common_logtype, common, NOTICE);
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index e4344ea392..14d04268e5 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -9,6 +9,8 @@
 #include <base/virtchnl2.h>
 #include <idpf_common_logs.h>
 
+#define IDPF_RSS_KEY_LEN	52
+
 #define IDPF_CTLQ_ID		-1
 #define IDPF_CTLQ_LEN		64
 #define IDPF_DFLT_MBX_BUF_SIZE	4096
@@ -43,7 +45,10 @@ struct idpf_chunks_info {
 
 struct idpf_vport {
 	struct idpf_adapter *adapter; /* Backreference to associated adapter */
-	struct virtchnl2_create_vport *vport_info; /* virtchnl response info handling */
+	union {
+		struct virtchnl2_create_vport info; /* virtchnl response info handling */
+		uint8_t data[IDPF_DFLT_MBX_BUF_SIZE];
+	} vport_info;
 	uint16_t sw_idx; /* SW index in adapter->vports[]*/
 	uint16_t vport_id;
 	uint32_t txq_model;
@@ -142,5 +147,11 @@ __rte_internal
 int idpf_adapter_init(struct idpf_adapter *adapter);
 __rte_internal
 int idpf_adapter_deinit(struct idpf_adapter *adapter);
+__rte_internal
+int idpf_vport_init(struct idpf_vport *vport,
+		    struct virtchnl2_create_vport *vport_req_info,
+		    void *dev_data);
+__rte_internal
+int idpf_vport_deinit(struct idpf_vport *vport);
 
 #endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 2e94a95876..1531adccef 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -355,7 +355,7 @@ idpf_vc_get_caps(struct idpf_adapter *adapter)
 
 int
 idpf_vc_create_vport(struct idpf_vport *vport,
-		     struct virtchnl2_create_vport *vport_req_info)
+		     struct virtchnl2_create_vport *create_vport_info)
 {
 	struct idpf_adapter *adapter = vport->adapter;
 	struct virtchnl2_create_vport vport_msg;
@@ -363,13 +363,13 @@ idpf_vc_create_vport(struct idpf_vport *vport,
 	int err = -1;
 
 	memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
-	vport_msg.vport_type = vport_req_info->vport_type;
-	vport_msg.txq_model = vport_req_info->txq_model;
-	vport_msg.rxq_model = vport_req_info->rxq_model;
-	vport_msg.num_tx_q = vport_req_info->num_tx_q;
-	vport_msg.num_tx_complq = vport_req_info->num_tx_complq;
-	vport_msg.num_rx_q = vport_req_info->num_rx_q;
-	vport_msg.num_rx_bufq = vport_req_info->num_rx_bufq;
+	vport_msg.vport_type = create_vport_info->vport_type;
+	vport_msg.txq_model = create_vport_info->txq_model;
+	vport_msg.rxq_model = create_vport_info->rxq_model;
+	vport_msg.num_tx_q = create_vport_info->num_tx_q;
+	vport_msg.num_tx_complq = create_vport_info->num_tx_complq;
+	vport_msg.num_rx_q = create_vport_info->num_rx_q;
+	vport_msg.num_rx_bufq = create_vport_info->num_rx_bufq;
 
 	memset(&args, 0, sizeof(args));
 	args.ops = VIRTCHNL2_OP_CREATE_VPORT;
@@ -385,7 +385,7 @@ idpf_vc_create_vport(struct idpf_vport *vport,
 		return err;
 	}
 
-	rte_memcpy(vport->vport_info, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+	rte_memcpy(&(vport->vport_info.info), args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
 	return 0;
 }
 
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 7259dcf8a4..680a69822c 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -25,6 +25,8 @@ INTERNAL {
 	idpf_vc_set_rss_hash;
 	idpf_vc_set_rss_key;
 	idpf_vc_set_rss_lut;
+	idpf_vport_deinit;
+	idpf_vport_init;
 
 	local: *;
 };
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index c17c7bb472..7a8fb6fd4a 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -178,73 +178,6 @@ idpf_init_vport_req_info(struct rte_eth_dev *dev,
 	return 0;
 }
 
-#define IDPF_RSS_KEY_LEN 52
-
-static int
-idpf_init_vport(struct idpf_vport *vport)
-{
-	struct virtchnl2_create_vport *vport_info = vport->vport_info;
-	int i, type;
-
-	vport->vport_id = vport_info->vport_id;
-	vport->txq_model = vport_info->txq_model;
-	vport->rxq_model = vport_info->rxq_model;
-	vport->num_tx_q = vport_info->num_tx_q;
-	vport->num_tx_complq = vport_info->num_tx_complq;
-	vport->num_rx_q = vport_info->num_rx_q;
-	vport->num_rx_bufq = vport_info->num_rx_bufq;
-	vport->max_mtu = vport_info->max_mtu;
-	rte_memcpy(vport->default_mac_addr,
-		   vport_info->default_mac_addr, ETH_ALEN);
-	vport->rss_algorithm = vport_info->rss_algorithm;
-	vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
-				     vport_info->rss_key_size);
-	vport->rss_lut_size = vport_info->rss_lut_size;
-
-	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
-		type = vport_info->chunks.chunks[i].type;
-		switch (type) {
-		case VIRTCHNL2_QUEUE_TYPE_TX:
-			vport->chunks_info.tx_start_qid =
-				vport_info->chunks.chunks[i].start_queue_id;
-			vport->chunks_info.tx_qtail_start =
-				vport_info->chunks.chunks[i].qtail_reg_start;
-			vport->chunks_info.tx_qtail_spacing =
-				vport_info->chunks.chunks[i].qtail_reg_spacing;
-			break;
-		case VIRTCHNL2_QUEUE_TYPE_RX:
-			vport->chunks_info.rx_start_qid =
-				vport_info->chunks.chunks[i].start_queue_id;
-			vport->chunks_info.rx_qtail_start =
-				vport_info->chunks.chunks[i].qtail_reg_start;
-			vport->chunks_info.rx_qtail_spacing =
-				vport_info->chunks.chunks[i].qtail_reg_spacing;
-			break;
-		case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
-			vport->chunks_info.tx_compl_start_qid =
-				vport_info->chunks.chunks[i].start_queue_id;
-			vport->chunks_info.tx_compl_qtail_start =
-				vport_info->chunks.chunks[i].qtail_reg_start;
-			vport->chunks_info.tx_compl_qtail_spacing =
-				vport_info->chunks.chunks[i].qtail_reg_spacing;
-			break;
-		case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
-			vport->chunks_info.rx_buf_start_qid =
-				vport_info->chunks.chunks[i].start_queue_id;
-			vport->chunks_info.rx_buf_qtail_start =
-				vport_info->chunks.chunks[i].qtail_reg_start;
-			vport->chunks_info.rx_buf_qtail_spacing =
-				vport_info->chunks.chunks[i].qtail_reg_spacing;
-			break;
-		default:
-			PMD_INIT_LOG(ERR, "Unsupported queue type");
-			break;
-		}
-	}
-
-	return 0;
-}
-
 static int
 idpf_config_rss(struct idpf_vport *vport)
 {
@@ -276,63 +209,34 @@ idpf_init_rss(struct idpf_vport *vport)
 {
 	struct rte_eth_rss_conf *rss_conf;
 	struct rte_eth_dev_data *dev_data;
-	uint16_t i, nb_q, lut_size;
+	uint16_t i, nb_q;
 	int ret = 0;
 
 	dev_data = vport->dev_data;
 	rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf;
 	nb_q = dev_data->nb_rx_queues;
 
-	vport->rss_key = rte_zmalloc("rss_key",
-				     vport->rss_key_size, 0);
-	if (vport->rss_key == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to allocate RSS key");
-		ret = -ENOMEM;
-		goto err_alloc_key;
-	}
-
-	lut_size = vport->rss_lut_size;
-	vport->rss_lut = rte_zmalloc("rss_lut",
-				     sizeof(uint32_t) * lut_size, 0);
-	if (vport->rss_lut == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to allocate RSS lut");
-		ret = -ENOMEM;
-		goto err_alloc_lut;
-	}
-
 	if (rss_conf->rss_key == NULL) {
 		for (i = 0; i < vport->rss_key_size; i++)
 			vport->rss_key[i] = (uint8_t)rte_rand();
 	} else if (rss_conf->rss_key_len != vport->rss_key_size) {
 		PMD_INIT_LOG(ERR, "Invalid RSS key length in RSS configuration, should be %d",
 			     vport->rss_key_size);
-		ret = -EINVAL;
-		goto err_cfg_key;
+		return -EINVAL;
 	} else {
 		rte_memcpy(vport->rss_key, rss_conf->rss_key,
 			   vport->rss_key_size);
 	}
 
-	for (i = 0; i < lut_size; i++)
+	for (i = 0; i < vport->rss_lut_size; i++)
 		vport->rss_lut[i] = i % nb_q;
 
 	vport->rss_hf = IDPF_DEFAULT_RSS_HASH_EXPANDED;
 
 	ret = idpf_config_rss(vport);
-	if (ret != 0) {
+	if (ret != 0)
 		PMD_INIT_LOG(ERR, "Failed to configure RSS");
-		goto err_cfg_key;
-	}
-
-	return ret;
 
-err_cfg_key:
-	rte_free(vport->rss_lut);
-	vport->rss_lut = NULL;
-err_alloc_lut:
-	rte_free(vport->rss_key);
-	vport->rss_key = NULL;
-err_alloc_key:
 	return ret;
 }
 
@@ -602,13 +506,7 @@ idpf_dev_close(struct rte_eth_dev *dev)
 
 	idpf_dev_stop(dev);
 
-	idpf_vc_destroy_vport(vport);
-
-	rte_free(vport->rss_lut);
-	vport->rss_lut = NULL;
-
-	rte_free(vport->rss_key);
-	vport->rss_key = NULL;
+	idpf_vport_deinit(vport);
 
 	rte_free(vport->recv_vectors);
 	vport->recv_vectors = NULL;
@@ -892,13 +790,6 @@ idpf_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	vport->sw_idx = param->idx;
 	vport->devarg_id = param->devarg_id;
 
-	vport->vport_info = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0);
-	if (vport->vport_info == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to allocate vport_info");
-		ret = -ENOMEM;
-		goto err;
-	}
-
 	memset(&vport_req_info, 0, sizeof(vport_req_info));
 	ret = idpf_init_vport_req_info(dev, &vport_req_info);
 	if (ret != 0) {
@@ -906,19 +797,12 @@ idpf_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 		goto err;
 	}
 
-	ret = idpf_vc_create_vport(vport, &vport_req_info);
-	if (ret != 0) {
-		PMD_INIT_LOG(ERR, "Failed to create vport.");
-		goto err_create_vport;
-	}
-
-	ret = idpf_init_vport(vport);
+	ret = idpf_vport_init(vport, &vport_req_info, dev->data);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Failed to init vports.");
-		goto err_init_vport;
+		goto err;
 	}
 
-	vport->dev_data = dev->data;
 	adapter->vports[param->idx] = vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
@@ -927,7 +811,7 @@ idpf_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	if (dev->data->mac_addrs == NULL) {
 		PMD_INIT_LOG(ERR, "Cannot allocate mac_addr memory.");
 		ret = -ENOMEM;
-		goto err_init_vport;
+		goto err_mac_addrs;
 	}
 
 	rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
@@ -935,11 +819,9 @@ idpf_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 
 	return 0;
 
-err_init_vport:
+err_mac_addrs:
 	adapter->vports[param->idx] = NULL;  /* reset */
-	idpf_vc_destroy_vport(vport);
-err_create_vport:
-	rte_free(vport->vport_info);
+	idpf_vport_deinit(vport);
 err:
 	return ret;
 }
-- 
2.26.2


  parent reply	other threads:[~2023-01-17  7:51 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <https://patches.dpdk.org/project/dpdk/cover/20230106091627.13530-1-beilei.xing@intel.com/>
2023-01-17  7:26 ` [PATCH v3 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-01-17  7:26   ` [PATCH v3 01/15] common/idpf: add adapter structure beilei.xing
2023-01-17  7:26   ` [PATCH v3 02/15] common/idpf: add vport structure beilei.xing
2023-01-17  7:26   ` [PATCH v3 03/15] common/idpf: add virtual channel functions beilei.xing
2023-01-17  7:26   ` [PATCH 03/15] common/idpf: move vc functions to common module beilei.xing
2023-01-17  7:26   ` [PATCH v3 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-01-17  7:26   ` beilei.xing [this message]
2023-01-17  7:26   ` [PATCH v3 06/15] common/idpf: add config RSS beilei.xing
2023-01-17  7:26   ` [PATCH v3 07/15] common/idpf: add irq map/unmap beilei.xing
2023-01-17  7:26   ` [PATCH 08/15] common/idpf: move ptype table to adapter structure beilei.xing
2023-01-17  7:26   ` [PATCH v3 08/15] common/idpf: support get packet type beilei.xing
2023-01-17  7:26   ` [PATCH v3 09/15] common/idpf: add vport info initialization beilei.xing
2023-01-17  7:26   ` [PATCH 09/15] common/idpf: init create vport info beilei.xing
2023-01-17  7:26   ` [PATCH v3 10/15] common/idpf: add vector flags in vport beilei.xing
2023-01-17  7:26   ` [PATCH v3 11/15] common/idpf: add rxq and txq struct beilei.xing
2023-01-17  7:26   ` [PATCH v3 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-01-17  7:26   ` [PATCH v3 13/15] common/idpf: add Rx and Tx data path beilei.xing
2023-01-17  7:26   ` [PATCH 13/15] common/idpf: add scalar " beilei.xing
2023-01-17  7:26   ` [PATCH v3 14/15] common/idpf: add vec queue setup beilei.xing
2023-01-17  7:26   ` [PATCH v3 15/15] common/idpf: add avx512 for single queue model beilei.xing

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230117072626.93796-7-beilei.xing@intel.com \
    --to=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=wenjun1.wu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).