DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/3] net/idpf: code refine
@ 2022-12-08  7:27 beilei.xing
  2022-12-08  7:27 ` [PATCH 1/3] net/idpf: remove vport req and recv info from adapter beilei.xing
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: beilei.xing @ 2022-12-08  7:27 UTC (permalink / raw)
  To: jingjing.wu, qi.z.zhang; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

1. Remove some unnecessary fields from idpf_adapter structure.
2. Fix xmit free for split queue model.

Jingjing Wu (3):
  net/idpf: remove vport req and recv info from adapter
  net/idpf: remove req vports from adapter
  net/idpf: fix splitq xmit free

 drivers/net/idpf/idpf_ethdev.c | 258 +++++++++++++--------------------
 drivers/net/idpf/idpf_ethdev.h |  28 ++--
 drivers/net/idpf/idpf_rxtx.c   |  29 ++--
 drivers/net/idpf/idpf_vchnl.c  |  18 +--
 4 files changed, 139 insertions(+), 194 deletions(-)

-- 
2.26.2


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/3] net/idpf: remove vport req and recv info from adapter
  2022-12-08  7:27 [PATCH 0/3] net/idpf: code refine beilei.xing
@ 2022-12-08  7:27 ` beilei.xing
  2022-12-08  7:27 ` [PATCH 2/3] net/idpf: remove req vports " beilei.xing
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 11+ messages in thread
From: beilei.xing @ 2022-12-08  7:27 UTC (permalink / raw)
  To: jingjing.wu, qi.z.zhang; +Cc: dev, Beilei Xing

From: Jingjing Wu <jingjing.wu@intel.com>

This patch refines idpf_adapter structure by removing vport request
and receive info.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c | 185 +++++++++++----------------------
 drivers/net/idpf/idpf_ethdev.h |  19 ++--
 drivers/net/idpf/idpf_vchnl.c  |  18 +---
 3 files changed, 74 insertions(+), 148 deletions(-)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 8b347631ce..d8b7b069cf 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -134,29 +134,11 @@ idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
 }
 
 static int
-idpf_init_vport_req_info(struct rte_eth_dev *dev)
+idpf_init_vport_req_info(struct rte_eth_dev *dev,
+			 struct virtchnl2_create_vport *vport_info)
 {
 	struct idpf_vport *vport = dev->data->dev_private;
 	struct idpf_adapter *adapter = vport->adapter;
-	struct virtchnl2_create_vport *vport_info;
-	uint16_t idx = adapter->cur_vport_idx;
-
-	if (idx == IDPF_INVALID_VPORT_IDX) {
-		PMD_INIT_LOG(ERR, "Invalid vport index.");
-		return -EINVAL;
-	}
-
-	if (adapter->vport_req_info[idx] == NULL) {
-		adapter->vport_req_info[idx] = rte_zmalloc(NULL,
-				sizeof(struct virtchnl2_create_vport), 0);
-		if (adapter->vport_req_info[idx] == NULL) {
-			PMD_INIT_LOG(ERR, "Failed to allocate vport_req_info");
-			return -ENOMEM;
-		}
-	}
-
-	vport_info =
-		(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];
 
 	vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
 	if (adapter->txq_model == 0) {
@@ -187,35 +169,13 @@ idpf_init_vport_req_info(struct rte_eth_dev *dev)
 	return 0;
 }
 
-static int
-idpf_parse_devarg_id(char *name)
-{
-	uint16_t val;
-	char *p;
-
-	p = strstr(name, "vport_");
-
-	if (p == NULL)
-		return -EINVAL;
-
-	p += sizeof("vport_") - 1;
-
-	val = strtoul(p, NULL, 10);
-
-	return val;
-}
-
 #define IDPF_RSS_KEY_LEN 52
 
 static int
-idpf_init_vport(struct rte_eth_dev *dev)
+idpf_init_vport(struct idpf_vport *vport)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
-	struct idpf_adapter *adapter = vport->adapter;
-	uint16_t idx = adapter->cur_vport_idx;
-	struct virtchnl2_create_vport *vport_info =
-		(struct virtchnl2_create_vport *)adapter->vport_recv_info[idx];
-	int i, type, ret;
+	struct virtchnl2_create_vport *vport_info = vport->vport_info;
+	int i, type;
 
 	vport->vport_id = vport_info->vport_id;
 	vport->txq_model = vport_info->txq_model;
@@ -231,7 +191,6 @@ idpf_init_vport(struct rte_eth_dev *dev)
 	vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
 				     vport_info->rss_key_size);
 	vport->rss_lut_size = vport_info->rss_lut_size;
-	vport->sw_idx = idx;
 
 	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
 		type = vport_info->chunks.chunks[i].type;
@@ -274,17 +233,6 @@ idpf_init_vport(struct rte_eth_dev *dev)
 		}
 	}
 
-	ret = idpf_parse_devarg_id(dev->data->name);
-	if (ret < 0) {
-		PMD_INIT_LOG(ERR, "Failed to parse devarg id.");
-		return -EINVAL;
-	}
-	vport->devarg_id = ret;
-
-	vport->dev_data = dev->data;
-
-	adapter->vports[idx] = vport;
-
 	return 0;
 }
 
@@ -662,9 +610,10 @@ idpf_dev_close(struct rte_eth_dev *dev)
 	vport->qv_map = NULL;
 
 	adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
-
-	rte_free(vport);
+	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
+	adapter->vports[vport->sw_idx] = NULL;
+	rte_free(vport);
 
 	return 0;
 }
@@ -757,10 +706,7 @@ parse_vport(const char *key, const char *value, void *args)
 	}
 
 	for (i = 0; i < adapter->req_vport_nb; i++) {
-		if ((adapter->cur_vports & RTE_BIT32(adapter->req_vports[i])) == 0) {
-			adapter->cur_vports |= RTE_BIT32(adapter->req_vports[i]);
-			adapter->cur_vport_nb++;
-		} else {
+		if (adapter->cur_vports & RTE_BIT32(adapter->req_vports[i])) {
 			PMD_INIT_LOG(ERR, "Vport %d has been created",
 				     adapter->req_vports[i]);
 			return -EINVAL;
@@ -798,6 +744,8 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 	struct rte_kvargs *kvlist;
 	int ret;
 
+	adapter->req_vport_nb = 0;
+
 	if (devargs == NULL)
 		return 0;
 
@@ -981,26 +929,6 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 
 	adapter->max_vport_nb = adapter->caps->max_vports;
 
-	adapter->vport_req_info = rte_zmalloc("vport_req_info",
-					      adapter->max_vport_nb *
-					      sizeof(*adapter->vport_req_info),
-					      0);
-	if (adapter->vport_req_info == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to allocate vport_req_info memory");
-		ret = -ENOMEM;
-		goto err_caps;
-	}
-
-	adapter->vport_recv_info = rte_zmalloc("vport_recv_info",
-					       adapter->max_vport_nb *
-					       sizeof(*adapter->vport_recv_info),
-					       0);
-	if (adapter->vport_recv_info == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to allocate vport_recv_info memory");
-		ret = -ENOMEM;
-		goto err_vport_recv_info;
-	}
-
 	adapter->vports = rte_zmalloc("vports",
 				      adapter->max_vport_nb *
 				      sizeof(*adapter->vports),
@@ -1026,11 +954,6 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 	return ret;
 
 err_vports:
-	rte_free(adapter->vport_recv_info);
-	adapter->vport_recv_info = NULL;
-err_vport_recv_info:
-	rte_free(adapter->vport_req_info);
-	adapter->vport_req_info = NULL;
 err_caps:
 	rte_free(adapter->caps);
 	adapter->caps = NULL;
@@ -1063,17 +986,17 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
 };
 
 static uint16_t
-idpf_get_vport_idx(struct idpf_vport **vports, uint16_t max_vport_nb)
+idpf_vport_idx_alloc(struct idpf_adapter *ad)
 {
 	uint16_t vport_idx;
 	uint16_t i;
 
-	for (i = 0; i < max_vport_nb; i++) {
-		if (vports[i] == NULL)
+	for (i = 0; i < ad->max_vport_nb; i++) {
+		if (ad->vports[i] == NULL)
 			break;
 	}
 
-	if (i == max_vport_nb)
+	if (i == ad->max_vport_nb)
 		vport_idx = IDPF_INVALID_VPORT_IDX;
 	else
 		vport_idx = i;
@@ -1082,35 +1005,50 @@ idpf_get_vport_idx(struct idpf_vport **vports, uint16_t max_vport_nb)
 }
 
 static int
-idpf_dev_init(struct rte_eth_dev *dev, void *init_params)
+idpf_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
 	struct idpf_vport *vport = dev->data->dev_private;
-	struct idpf_adapter *adapter = init_params;
+	struct idpf_vport_param *param = init_params;
+	struct idpf_adapter *adapter = param->adapter;
+	/* for sending create vport virtchnl msg prepare */
+	struct virtchnl2_create_vport vport_req_info;
 	int ret = 0;
 
 	dev->dev_ops = &idpf_eth_dev_ops;
 	vport->adapter = adapter;
+	vport->sw_idx = param->idx;
+	vport->devarg_id = param->devarg_id;
+
+	vport->vport_info = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0);
+	if (vport->vport_info == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to allocate vport_info");
+		ret = -ENOMEM;
+		goto err;
+	}
 
-	ret = idpf_init_vport_req_info(dev);
+	memset(&vport_req_info, 0, sizeof(vport_req_info));
+	ret = idpf_init_vport_req_info(dev, &vport_req_info);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Failed to init vport req_info.");
 		goto err;
 	}
 
-	ret = idpf_vc_create_vport(adapter);
+	ret = idpf_vc_create_vport(vport, &vport_req_info);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Failed to create vport.");
 		goto err_create_vport;
 	}
 
-	ret = idpf_init_vport(dev);
+	ret = idpf_init_vport(vport);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Failed to init vports.");
 		goto err_init_vport;
 	}
 
-	adapter->cur_vport_idx = idpf_get_vport_idx(adapter->vports,
-						    adapter->max_vport_nb);
+	vport->dev_data = dev->data;
+	adapter->vports[param->idx] = vport;
+	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
+	adapter->cur_vport_nb++;
 
 	dev->data->mac_addrs = rte_zmalloc(NULL, RTE_ETHER_ADDR_LEN, 0);
 	if (dev->data->mac_addrs == NULL) {
@@ -1125,9 +1063,10 @@ idpf_dev_init(struct rte_eth_dev *dev, void *init_params)
 	return 0;
 
 err_init_vport:
+	adapter->vports[param->idx] = NULL;  /* reset */
 	idpf_vc_destroy_vport(vport);
 err_create_vport:
-	rte_free(vport->adapter->vport_req_info[vport->adapter->cur_vport_idx]);
+	rte_free(vport->vport_info);
 err:
 	return ret;
 }
@@ -1165,7 +1104,6 @@ static void
 idpf_adapter_rel(struct idpf_adapter *adapter)
 {
 	struct idpf_hw *hw = &adapter->hw;
-	int i;
 
 	idpf_ctlq_deinit(hw);
 
@@ -1175,24 +1113,6 @@ idpf_adapter_rel(struct idpf_adapter *adapter)
 	rte_free(adapter->mbx_resp);
 	adapter->mbx_resp = NULL;
 
-	if (adapter->vport_req_info != NULL) {
-		for (i = 0; i < adapter->max_vport_nb; i++) {
-			rte_free(adapter->vport_req_info[i]);
-			adapter->vport_req_info[i] = NULL;
-		}
-		rte_free(adapter->vport_req_info);
-		adapter->vport_req_info = NULL;
-	}
-
-	if (adapter->vport_recv_info != NULL) {
-		for (i = 0; i < adapter->max_vport_nb; i++) {
-			rte_free(adapter->vport_recv_info[i]);
-			adapter->vport_recv_info[i] = NULL;
-		}
-		rte_free(adapter->vport_recv_info);
-		adapter->vport_recv_info = NULL;
-	}
-
 	rte_free(adapter->vports);
 	adapter->vports = NULL;
 }
@@ -1201,6 +1121,7 @@ static int
 idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	       struct rte_pci_device *pci_dev)
 {
+	struct idpf_vport_param vport_param;
 	struct idpf_adapter *adapter;
 	char name[RTE_ETH_NAME_MAX_LEN];
 	int i, retval;
@@ -1241,28 +1162,40 @@ idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 
 	if (adapter->req_vport_nb == 0) {
 		/* If no vport devarg, create vport 0 by default. */
+		vport_param.adapter = adapter;
+		vport_param.devarg_id = 0;
+		vport_param.idx = idpf_vport_idx_alloc(adapter);
+		if (vport_param.idx == IDPF_INVALID_VPORT_IDX) {
+			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
+			return 0;
+		}
 		snprintf(name, sizeof(name), "idpf_%s_vport_0",
 			 pci_dev->device.name);
 		retval = rte_eth_dev_create(&pci_dev->device, name,
 					    sizeof(struct idpf_vport),
-					    NULL, NULL, idpf_dev_init,
-					    adapter);
+					    NULL, NULL, idpf_dev_vport_init,
+					    &vport_param);
 		if (retval != 0)
 			PMD_DRV_LOG(ERR, "Failed to create default vport 0");
-		adapter->cur_vports |= RTE_BIT32(0);
-		adapter->cur_vport_nb++;
 	} else {
 		for (i = 0; i < adapter->req_vport_nb; i++) {
+			vport_param.adapter = adapter;
+			vport_param.devarg_id = adapter->req_vports[i];
+			vport_param.idx = idpf_vport_idx_alloc(adapter);
+			if (vport_param.idx == IDPF_INVALID_VPORT_IDX) {
+				PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
+				break;
+			}
 			snprintf(name, sizeof(name), "idpf_%s_vport_%d",
 				 pci_dev->device.name,
 				 adapter->req_vports[i]);
 			retval = rte_eth_dev_create(&pci_dev->device, name,
 						    sizeof(struct idpf_vport),
-						    NULL, NULL, idpf_dev_init,
-						    adapter);
+						    NULL, NULL, idpf_dev_vport_init,
+						    &vport_param);
 			if (retval != 0)
 				PMD_DRV_LOG(ERR, "Failed to create vport %d",
-					    adapter->req_vports[i]);
+					    vport_param.devarg_id);
 		}
 	}
 
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 991677c3bc..c236cc8f16 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -89,8 +89,16 @@ struct idpf_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+struct idpf_vport_param {
+	struct idpf_adapter *adapter;
+	uint16_t devarg_id; /* arg id from user */
+	uint16_t idx;       /* index in adapter->vports[]*/
+};
+
 struct idpf_vport {
 	struct idpf_adapter *adapter; /* Backreference to associated adapter */
+	struct virtchnl2_create_vport *vport_info; /* virtchnl response info handling */
+	uint16_t sw_idx; /* SW index in adapter->vports[]*/
 	uint16_t vport_id;
 	uint32_t txq_model;
 	uint32_t rxq_model;
@@ -108,8 +116,6 @@ struct idpf_vport {
 	uint16_t rss_key_size;
 	uint16_t rss_lut_size;
 
-	uint16_t sw_idx; /* SW idx */
-
 	struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
 	uint16_t max_pkt_len; /* Maximum packet length */
 
@@ -146,16 +152,12 @@ struct idpf_adapter {
 	uint32_t txq_model; /* 0 - split queue model, non-0 - single queue model */
 	uint32_t rxq_model; /* 0 - split queue model, non-0 - single queue model */
 
-	/* Vport info */
-	uint8_t **vport_req_info;
-	uint8_t **vport_recv_info;
 	struct idpf_vport **vports;
 	uint16_t max_vport_nb;
 	uint16_t req_vports[IDPF_MAX_VPORT_NUM];
 	uint16_t req_vport_nb;
-	uint16_t cur_vports;
+	uint16_t cur_vports; /* bit mask of created vport */
 	uint16_t cur_vport_nb;
-	uint16_t cur_vport_idx;
 
 	uint16_t used_vecs_num;
 
@@ -231,7 +233,8 @@ void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int idpf_vc_check_api_version(struct idpf_adapter *adapter);
 int idpf_get_pkt_type(struct idpf_adapter *adapter);
 int idpf_vc_get_caps(struct idpf_adapter *adapter);
-int idpf_vc_create_vport(struct idpf_adapter *adapter);
+int idpf_vc_create_vport(struct idpf_vport *vport,
+			 struct virtchnl2_create_vport *vport_info);
 int idpf_vc_destroy_vport(struct idpf_vport *vport);
 int idpf_vc_set_rss_key(struct idpf_vport *vport);
 int idpf_vc_set_rss_lut(struct idpf_vport *vport);
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index ac6486d4ef..14b34619af 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -583,11 +583,10 @@ idpf_vc_get_caps(struct idpf_adapter *adapter)
 }
 
 int
-idpf_vc_create_vport(struct idpf_adapter *adapter)
+idpf_vc_create_vport(struct idpf_vport *vport,
+		     struct virtchnl2_create_vport *vport_req_info)
 {
-	uint16_t idx = adapter->cur_vport_idx;
-	struct virtchnl2_create_vport *vport_req_info =
-		(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];
+	struct idpf_adapter *adapter = vport->adapter;
 	struct virtchnl2_create_vport vport_msg;
 	struct idpf_cmd_info args;
 	int err = -1;
@@ -615,16 +614,7 @@ idpf_vc_create_vport(struct idpf_adapter *adapter)
 		return err;
 	}
 
-	if (adapter->vport_recv_info[idx] == NULL) {
-		adapter->vport_recv_info[idx] = rte_zmalloc(NULL,
-						    IDPF_DFLT_MBX_BUF_SIZE, 0);
-		if (adapter->vport_recv_info[idx] == NULL) {
-			PMD_INIT_LOG(ERR, "Failed to alloc vport_recv_info.");
-			return -ENOMEM;
-		}
-	}
-	rte_memcpy(adapter->vport_recv_info[idx], args.out_buffer,
-		   IDPF_DFLT_MBX_BUF_SIZE);
+	rte_memcpy(vport->vport_info, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
 	return 0;
 }
 
-- 
2.26.2


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 2/3] net/idpf: remove req vports from adapter
  2022-12-08  7:27 [PATCH 0/3] net/idpf: code refine beilei.xing
  2022-12-08  7:27 ` [PATCH 1/3] net/idpf: remove vport req and recv info from adapter beilei.xing
@ 2022-12-08  7:27 ` beilei.xing
  2022-12-08  7:27 ` [PATCH 3/3] net/idpf: fix splitq xmit free beilei.xing
  2023-01-06  9:04 ` [PATCH v2 0/5] net/idpf: code refine beilei.xing
  3 siblings, 0 replies; 11+ messages in thread
From: beilei.xing @ 2022-12-08  7:27 UTC (permalink / raw)
  To: jingjing.wu, qi.z.zhang; +Cc: dev, Beilei Xing

From: Jingjing Wu <jingjing.wu@intel.com>

This patch refines idpf_adapter structure by removing req_vports.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c | 79 ++++++++++++++++++----------------
 drivers/net/idpf/idpf_ethdev.h |  9 +++-
 2 files changed, 49 insertions(+), 39 deletions(-)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index d8b7b069cf..f7b3f8f515 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -619,29 +619,30 @@ idpf_dev_close(struct rte_eth_dev *dev)
 }
 
 static int
-insert_value(struct idpf_adapter *adapter, uint16_t id)
+insert_value(struct idpf_devargs *devargs, uint16_t id)
 {
 	uint16_t i;
 
-	for (i = 0; i < adapter->req_vport_nb; i++) {
-		if (adapter->req_vports[i] == id)
+	/* ignore duplicate */
+	for (i = 0; i < devargs->req_vport_nb; i++) {
+		if (devargs->req_vports[i] == id)
 			return 0;
 	}
 
-	if (adapter->req_vport_nb >= RTE_DIM(adapter->req_vports)) {
+	if (devargs->req_vport_nb >= RTE_DIM(devargs->req_vports)) {
 		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
 			     IDPF_MAX_VPORT_NUM);
 		return -EINVAL;
 	}
 
-	adapter->req_vports[adapter->req_vport_nb] = id;
-	adapter->req_vport_nb++;
+	devargs->req_vports[devargs->req_vport_nb] = id;
+	devargs->req_vport_nb++;
 
 	return 0;
 }
 
 static const char *
-parse_range(const char *value, struct idpf_adapter *adapter)
+parse_range(const char *value, struct idpf_devargs *devargs)
 {
 	uint16_t lo, hi, i;
 	int n = 0;
@@ -652,13 +653,13 @@ parse_range(const char *value, struct idpf_adapter *adapter)
 	if (result == 1) {
 		if (lo >= IDPF_MAX_VPORT_NUM)
 			return NULL;
-		if (insert_value(adapter, lo) != 0)
+		if (insert_value(devargs, lo) != 0)
 			return NULL;
 	} else if (result == 2) {
 		if (lo > hi || hi >= IDPF_MAX_VPORT_NUM)
 			return NULL;
 		for (i = lo; i <= hi; i++) {
-			if (insert_value(adapter, i) != 0)
+			if (insert_value(devargs, i) != 0)
 				return NULL;
 		}
 	} else {
@@ -671,17 +672,16 @@ parse_range(const char *value, struct idpf_adapter *adapter)
 static int
 parse_vport(const char *key, const char *value, void *args)
 {
-	struct idpf_adapter *adapter = args;
+	struct idpf_devargs *devargs = args;
 	const char *pos = value;
-	int i;
 
-	adapter->req_vport_nb = 0;
+	devargs->req_vport_nb = 0;
 
 	if (*pos == '[')
 		pos++;
 
 	while (1) {
-		pos = parse_range(pos, adapter);
+		pos = parse_range(pos, devargs);
 		if (pos == NULL) {
 			PMD_INIT_LOG(ERR, "invalid value:\"%s\" for key:\"%s\", ",
 				     value, key);
@@ -698,21 +698,6 @@ parse_vport(const char *key, const char *value, void *args)
 		return -EINVAL;
 	}
 
-	if (adapter->cur_vport_nb + adapter->req_vport_nb >
-	    IDPF_MAX_VPORT_NUM) {
-		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
-			     IDPF_MAX_VPORT_NUM);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < adapter->req_vport_nb; i++) {
-		if (adapter->cur_vports & RTE_BIT32(adapter->req_vports[i])) {
-			PMD_INIT_LOG(ERR, "Vport %d has been created",
-				     adapter->req_vports[i]);
-			return -EINVAL;
-		}
-	}
-
 	return 0;
 }
 
@@ -738,13 +723,14 @@ parse_bool(const char *key, const char *value, void *args)
 }
 
 static int
-idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
+idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter,
+		   struct idpf_devargs *idpf_args)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
 	struct rte_kvargs *kvlist;
-	int ret;
+	int i, ret;
 
-	adapter->req_vport_nb = 0;
+	idpf_args->req_vport_nb = 0;
 
 	if (devargs == NULL)
 		return 0;
@@ -755,8 +741,26 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 		return -EINVAL;
 	}
 
+	/* check parsed devargs */
+	if (adapter->cur_vport_nb + idpf_args->req_vport_nb >
+	    IDPF_MAX_VPORT_NUM) {
+		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
+			     IDPF_MAX_VPORT_NUM);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	for (i = 0; i < idpf_args->req_vport_nb; i++) {
+		if (adapter->cur_vports & RTE_BIT32(idpf_args->req_vports[i])) {
+			PMD_INIT_LOG(ERR, "Vport %d has been created",
+				     idpf_args->req_vports[i]);
+			ret = -EINVAL;
+			goto bail;
+		}
+	}
+
 	ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
-				 adapter);
+				 idpf_args);
 	if (ret != 0)
 		goto bail;
 
@@ -1123,6 +1127,7 @@ idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 {
 	struct idpf_vport_param vport_param;
 	struct idpf_adapter *adapter;
+	struct idpf_devargs devargs;
 	char name[RTE_ETH_NAME_MAX_LEN];
 	int i, retval;
 	bool first_probe = false;
@@ -1154,13 +1159,13 @@ idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		rte_spinlock_unlock(&idpf_adapter_lock);
 	}
 
-	retval = idpf_parse_devargs(pci_dev, adapter);
+	retval = idpf_parse_devargs(pci_dev, adapter, &devargs);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
 		goto err;
 	}
 
-	if (adapter->req_vport_nb == 0) {
+	if (devargs.req_vport_nb == 0) {
 		/* If no vport devarg, create vport 0 by default. */
 		vport_param.adapter = adapter;
 		vport_param.devarg_id = 0;
@@ -1178,9 +1183,9 @@ idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		if (retval != 0)
 			PMD_DRV_LOG(ERR, "Failed to create default vport 0");
 	} else {
-		for (i = 0; i < adapter->req_vport_nb; i++) {
+		for (i = 0; i < devargs.req_vport_nb; i++) {
 			vport_param.adapter = adapter;
-			vport_param.devarg_id = adapter->req_vports[i];
+			vport_param.devarg_id = devargs.req_vports[i];
 			vport_param.idx = idpf_vport_idx_alloc(adapter);
 			if (vport_param.idx == IDPF_INVALID_VPORT_IDX) {
 				PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
@@ -1188,7 +1193,7 @@ idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 			}
 			snprintf(name, sizeof(name), "idpf_%s_vport_%d",
 				 pci_dev->device.name,
-				 adapter->req_vports[i]);
+				 devargs.req_vports[i]);
 			retval = rte_eth_dev_create(&pci_dev->device, name,
 						    sizeof(struct idpf_vport),
 						    NULL, NULL, idpf_dev_vport_init,
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index c236cc8f16..bf37d5184c 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -137,6 +137,12 @@ struct idpf_vport {
 	bool stopped;
 };
 
+/* Struct used when parse driver specific devargs */
+struct idpf_devargs {
+	uint16_t req_vports[IDPF_MAX_VPORT_NUM];
+	uint16_t req_vport_nb;
+};
+
 struct idpf_adapter {
 	TAILQ_ENTRY(idpf_adapter) next;
 	struct idpf_hw hw;
@@ -154,8 +160,7 @@ struct idpf_adapter {
 
 	struct idpf_vport **vports;
 	uint16_t max_vport_nb;
-	uint16_t req_vports[IDPF_MAX_VPORT_NUM];
-	uint16_t req_vport_nb;
+
 	uint16_t cur_vports; /* bit mask of created vport */
 	uint16_t cur_vport_nb;
 
-- 
2.26.2


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 3/3] net/idpf: fix splitq xmit free
  2022-12-08  7:27 [PATCH 0/3] net/idpf: code refine beilei.xing
  2022-12-08  7:27 ` [PATCH 1/3] net/idpf: remove vport req and recv info from adapter beilei.xing
  2022-12-08  7:27 ` [PATCH 2/3] net/idpf: remove req vports " beilei.xing
@ 2022-12-08  7:27 ` beilei.xing
  2023-01-06  9:04 ` [PATCH v2 0/5] net/idpf: code refine beilei.xing
  3 siblings, 0 replies; 11+ messages in thread
From: beilei.xing @ 2022-12-08  7:27 UTC (permalink / raw)
  To: jingjing.wu, qi.z.zhang; +Cc: dev, stable, Beilei Xing

From: Jingjing Wu <jingjing.wu@intel.com>

When context descriptor is used during sending packets, mbuf
is not freed correctly, it will cause mempool be exhausted.
This patch refines the free function.

Fixes: 770f4dfe0f79 ("net/idpf: support basic Tx data path")
Cc: stable@dpdk.org

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/idpf/idpf_rxtx.c | 29 +++++++++++++++++++----------
 1 file changed, 19 insertions(+), 10 deletions(-)

diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index b4a396c3f5..5aef8ba2b6 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -1508,6 +1508,7 @@ idpf_split_tx_free(struct idpf_tx_queue *cq)
 	struct idpf_tx_entry *txe;
 	struct idpf_tx_queue *txq;
 	uint16_t gen, qid, q_head;
+	uint16_t nb_desc_clean;
 	uint8_t ctype;
 
 	txd = &compl_ring[next];
@@ -1525,20 +1526,24 @@ idpf_split_tx_free(struct idpf_tx_queue *cq)
 
 	switch (ctype) {
 	case IDPF_TXD_COMPLT_RE:
-		if (q_head == 0)
-			txq->last_desc_cleaned = txq->nb_tx_desc - 1;
-		else
-			txq->last_desc_cleaned = q_head - 1;
-		if (unlikely((txq->last_desc_cleaned % 32) == 0)) {
+		/* clean to q_head which indicates be fetched txq desc id + 1.
+		 * TODO: need to refine and remove the if condition.
+		 */
+		if (unlikely(q_head % 32)) {
 			PMD_DRV_LOG(ERR, "unexpected desc (head = %u) completion.",
 						q_head);
 			return;
 		}
-
+		if (txq->last_desc_cleaned > q_head)
+			nb_desc_clean = (txq->nb_tx_desc - txq->last_desc_cleaned) +
+				q_head;
+		else
+			nb_desc_clean = q_head - txq->last_desc_cleaned;
+		txq->nb_free += nb_desc_clean;
+		txq->last_desc_cleaned = q_head;
 		break;
 	case IDPF_TXD_COMPLT_RS:
-		txq->nb_free++;
-		txq->nb_used--;
+		/* q_head indicates sw_id when ctype is 2 */
 		txe = &txq->sw_ring[q_head];
 		if (txe->mbuf != NULL) {
 			rte_pktmbuf_free_seg(txe->mbuf);
@@ -1693,12 +1698,16 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		/* fill the last descriptor with End of Packet (EOP) bit */
 		txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_EOP;
 
-		if (unlikely((tx_id % 32) == 0))
-			txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_RE;
 		if (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK)
 			txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
 		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
 		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
+
+		if (txq->nb_used >= 32) {
+			txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_RE;
+			/* Update txq RE bit counters */
+			txq->nb_used = 0;
+		}
 	}
 
 	/* update the tail pointer if any packets were processed */
-- 
2.26.2


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v2 0/5] net/idpf: code refine
  2022-12-08  7:27 [PATCH 0/3] net/idpf: code refine beilei.xing
                   ` (2 preceding siblings ...)
  2022-12-08  7:27 ` [PATCH 3/3] net/idpf: fix splitq xmit free beilei.xing
@ 2023-01-06  9:04 ` beilei.xing
  2023-01-06  9:04   ` [PATCH v2 1/5] net/idpf: remove vport req and recv info from adapter beilei.xing
                     ` (5 more replies)
  3 siblings, 6 replies; 11+ messages in thread
From: beilei.xing @ 2023-01-06  9:04 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, Beilei Xing

From: Beilei Xing <beilei.xing@intel.com>

1. Remove some unnecessary fields from idpf_adapter structure.
2. Fix xmit free for split queue model.
3. Fix driver init symbols.
4. Refine MTU configuration.

V2 changes:
 - fix driver init symbols
 - refine MTU setting

Jingjing Wu (5):
  net/idpf: remove vport req and recv info from adapter
  net/idpf: remove req vports from adapter
  net/idpf: fix splitq xmit free
  net/idpf: fix driver init symbols
  net/idpf: refine MTU setting

 drivers/net/idpf/idpf_ethdev.c | 301 ++++++++++++++-------------------
 drivers/net/idpf/idpf_ethdev.h |  29 ++--
 drivers/net/idpf/idpf_rxtx.c   |  29 ++--
 drivers/net/idpf/idpf_vchnl.c  |  18 +-
 4 files changed, 166 insertions(+), 211 deletions(-)

-- 
2.26.2


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v2 1/5] net/idpf: remove vport req and recv info from adapter
  2023-01-06  9:04 ` [PATCH v2 0/5] net/idpf: code refine beilei.xing
@ 2023-01-06  9:04   ` beilei.xing
  2023-01-06  9:04   ` [PATCH v2 2/5] net/idpf: remove req vports " beilei.xing
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 11+ messages in thread
From: beilei.xing @ 2023-01-06  9:04 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, Jingjing Wu, Beilei Xing

From: Jingjing Wu <jingjing.wu@intel.com>

This patch refines idpf_adapter structure by removing vport request
and receive info.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c | 185 +++++++++++----------------------
 drivers/net/idpf/idpf_ethdev.h |  19 ++--
 drivers/net/idpf/idpf_vchnl.c  |  18 +---
 3 files changed, 74 insertions(+), 148 deletions(-)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 8b347631ce..d8b7b069cf 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -134,29 +134,11 @@ idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
 }
 
 static int
-idpf_init_vport_req_info(struct rte_eth_dev *dev)
+idpf_init_vport_req_info(struct rte_eth_dev *dev,
+			 struct virtchnl2_create_vport *vport_info)
 {
 	struct idpf_vport *vport = dev->data->dev_private;
 	struct idpf_adapter *adapter = vport->adapter;
-	struct virtchnl2_create_vport *vport_info;
-	uint16_t idx = adapter->cur_vport_idx;
-
-	if (idx == IDPF_INVALID_VPORT_IDX) {
-		PMD_INIT_LOG(ERR, "Invalid vport index.");
-		return -EINVAL;
-	}
-
-	if (adapter->vport_req_info[idx] == NULL) {
-		adapter->vport_req_info[idx] = rte_zmalloc(NULL,
-				sizeof(struct virtchnl2_create_vport), 0);
-		if (adapter->vport_req_info[idx] == NULL) {
-			PMD_INIT_LOG(ERR, "Failed to allocate vport_req_info");
-			return -ENOMEM;
-		}
-	}
-
-	vport_info =
-		(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];
 
 	vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
 	if (adapter->txq_model == 0) {
@@ -187,35 +169,13 @@ idpf_init_vport_req_info(struct rte_eth_dev *dev)
 	return 0;
 }
 
-static int
-idpf_parse_devarg_id(char *name)
-{
-	uint16_t val;
-	char *p;
-
-	p = strstr(name, "vport_");
-
-	if (p == NULL)
-		return -EINVAL;
-
-	p += sizeof("vport_") - 1;
-
-	val = strtoul(p, NULL, 10);
-
-	return val;
-}
-
 #define IDPF_RSS_KEY_LEN 52
 
 static int
-idpf_init_vport(struct rte_eth_dev *dev)
+idpf_init_vport(struct idpf_vport *vport)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
-	struct idpf_adapter *adapter = vport->adapter;
-	uint16_t idx = adapter->cur_vport_idx;
-	struct virtchnl2_create_vport *vport_info =
-		(struct virtchnl2_create_vport *)adapter->vport_recv_info[idx];
-	int i, type, ret;
+	struct virtchnl2_create_vport *vport_info = vport->vport_info;
+	int i, type;
 
 	vport->vport_id = vport_info->vport_id;
 	vport->txq_model = vport_info->txq_model;
@@ -231,7 +191,6 @@ idpf_init_vport(struct rte_eth_dev *dev)
 	vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
 				     vport_info->rss_key_size);
 	vport->rss_lut_size = vport_info->rss_lut_size;
-	vport->sw_idx = idx;
 
 	for (i = 0; i < vport_info->chunks.num_chunks; i++) {
 		type = vport_info->chunks.chunks[i].type;
@@ -274,17 +233,6 @@ idpf_init_vport(struct rte_eth_dev *dev)
 		}
 	}
 
-	ret = idpf_parse_devarg_id(dev->data->name);
-	if (ret < 0) {
-		PMD_INIT_LOG(ERR, "Failed to parse devarg id.");
-		return -EINVAL;
-	}
-	vport->devarg_id = ret;
-
-	vport->dev_data = dev->data;
-
-	adapter->vports[idx] = vport;
-
 	return 0;
 }
 
@@ -662,9 +610,10 @@ idpf_dev_close(struct rte_eth_dev *dev)
 	vport->qv_map = NULL;
 
 	adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
-
-	rte_free(vport);
+	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
+	adapter->vports[vport->sw_idx] = NULL;
+	rte_free(vport);
 
 	return 0;
 }
@@ -757,10 +706,7 @@ parse_vport(const char *key, const char *value, void *args)
 	}
 
 	for (i = 0; i < adapter->req_vport_nb; i++) {
-		if ((adapter->cur_vports & RTE_BIT32(adapter->req_vports[i])) == 0) {
-			adapter->cur_vports |= RTE_BIT32(adapter->req_vports[i]);
-			adapter->cur_vport_nb++;
-		} else {
+		if (adapter->cur_vports & RTE_BIT32(adapter->req_vports[i])) {
 			PMD_INIT_LOG(ERR, "Vport %d has been created",
 				     adapter->req_vports[i]);
 			return -EINVAL;
@@ -798,6 +744,8 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 	struct rte_kvargs *kvlist;
 	int ret;
 
+	adapter->req_vport_nb = 0;
+
 	if (devargs == NULL)
 		return 0;
 
@@ -981,26 +929,6 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 
 	adapter->max_vport_nb = adapter->caps->max_vports;
 
-	adapter->vport_req_info = rte_zmalloc("vport_req_info",
-					      adapter->max_vport_nb *
-					      sizeof(*adapter->vport_req_info),
-					      0);
-	if (adapter->vport_req_info == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to allocate vport_req_info memory");
-		ret = -ENOMEM;
-		goto err_caps;
-	}
-
-	adapter->vport_recv_info = rte_zmalloc("vport_recv_info",
-					       adapter->max_vport_nb *
-					       sizeof(*adapter->vport_recv_info),
-					       0);
-	if (adapter->vport_recv_info == NULL) {
-		PMD_INIT_LOG(ERR, "Failed to allocate vport_recv_info memory");
-		ret = -ENOMEM;
-		goto err_vport_recv_info;
-	}
-
 	adapter->vports = rte_zmalloc("vports",
 				      adapter->max_vport_nb *
 				      sizeof(*adapter->vports),
@@ -1026,11 +954,6 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 	return ret;
 
 err_vports:
-	rte_free(adapter->vport_recv_info);
-	adapter->vport_recv_info = NULL;
-err_vport_recv_info:
-	rte_free(adapter->vport_req_info);
-	adapter->vport_req_info = NULL;
 err_caps:
 	rte_free(adapter->caps);
 	adapter->caps = NULL;
@@ -1063,17 +986,17 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
 };
 
 static uint16_t
-idpf_get_vport_idx(struct idpf_vport **vports, uint16_t max_vport_nb)
+idpf_vport_idx_alloc(struct idpf_adapter *ad)
 {
 	uint16_t vport_idx;
 	uint16_t i;
 
-	for (i = 0; i < max_vport_nb; i++) {
-		if (vports[i] == NULL)
+	for (i = 0; i < ad->max_vport_nb; i++) {
+		if (ad->vports[i] == NULL)
 			break;
 	}
 
-	if (i == max_vport_nb)
+	if (i == ad->max_vport_nb)
 		vport_idx = IDPF_INVALID_VPORT_IDX;
 	else
 		vport_idx = i;
@@ -1082,35 +1005,50 @@ idpf_get_vport_idx(struct idpf_vport **vports, uint16_t max_vport_nb)
 }
 
 static int
-idpf_dev_init(struct rte_eth_dev *dev, void *init_params)
+idpf_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
 	struct idpf_vport *vport = dev->data->dev_private;
-	struct idpf_adapter *adapter = init_params;
+	struct idpf_vport_param *param = init_params;
+	struct idpf_adapter *adapter = param->adapter;
+	/* for sending create vport virtchnl msg prepare */
+	struct virtchnl2_create_vport vport_req_info;
 	int ret = 0;
 
 	dev->dev_ops = &idpf_eth_dev_ops;
 	vport->adapter = adapter;
+	vport->sw_idx = param->idx;
+	vport->devarg_id = param->devarg_id;
+
+	vport->vport_info = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0);
+	if (vport->vport_info == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to allocate vport_info");
+		ret = -ENOMEM;
+		goto err;
+	}
 
-	ret = idpf_init_vport_req_info(dev);
+	memset(&vport_req_info, 0, sizeof(vport_req_info));
+	ret = idpf_init_vport_req_info(dev, &vport_req_info);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Failed to init vport req_info.");
 		goto err;
 	}
 
-	ret = idpf_vc_create_vport(adapter);
+	ret = idpf_vc_create_vport(vport, &vport_req_info);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Failed to create vport.");
 		goto err_create_vport;
 	}
 
-	ret = idpf_init_vport(dev);
+	ret = idpf_init_vport(vport);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Failed to init vports.");
 		goto err_init_vport;
 	}
 
-	adapter->cur_vport_idx = idpf_get_vport_idx(adapter->vports,
-						    adapter->max_vport_nb);
+	vport->dev_data = dev->data;
+	adapter->vports[param->idx] = vport;
+	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
+	adapter->cur_vport_nb++;
 
 	dev->data->mac_addrs = rte_zmalloc(NULL, RTE_ETHER_ADDR_LEN, 0);
 	if (dev->data->mac_addrs == NULL) {
@@ -1125,9 +1063,10 @@ idpf_dev_init(struct rte_eth_dev *dev, void *init_params)
 	return 0;
 
 err_init_vport:
+	adapter->vports[param->idx] = NULL;  /* reset */
 	idpf_vc_destroy_vport(vport);
 err_create_vport:
-	rte_free(vport->adapter->vport_req_info[vport->adapter->cur_vport_idx]);
+	rte_free(vport->vport_info);
 err:
 	return ret;
 }
@@ -1165,7 +1104,6 @@ static void
 idpf_adapter_rel(struct idpf_adapter *adapter)
 {
 	struct idpf_hw *hw = &adapter->hw;
-	int i;
 
 	idpf_ctlq_deinit(hw);
 
@@ -1175,24 +1113,6 @@ idpf_adapter_rel(struct idpf_adapter *adapter)
 	rte_free(adapter->mbx_resp);
 	adapter->mbx_resp = NULL;
 
-	if (adapter->vport_req_info != NULL) {
-		for (i = 0; i < adapter->max_vport_nb; i++) {
-			rte_free(adapter->vport_req_info[i]);
-			adapter->vport_req_info[i] = NULL;
-		}
-		rte_free(adapter->vport_req_info);
-		adapter->vport_req_info = NULL;
-	}
-
-	if (adapter->vport_recv_info != NULL) {
-		for (i = 0; i < adapter->max_vport_nb; i++) {
-			rte_free(adapter->vport_recv_info[i]);
-			adapter->vport_recv_info[i] = NULL;
-		}
-		rte_free(adapter->vport_recv_info);
-		adapter->vport_recv_info = NULL;
-	}
-
 	rte_free(adapter->vports);
 	adapter->vports = NULL;
 }
@@ -1201,6 +1121,7 @@ static int
 idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	       struct rte_pci_device *pci_dev)
 {
+	struct idpf_vport_param vport_param;
 	struct idpf_adapter *adapter;
 	char name[RTE_ETH_NAME_MAX_LEN];
 	int i, retval;
@@ -1241,28 +1162,40 @@ idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 
 	if (adapter->req_vport_nb == 0) {
 		/* If no vport devarg, create vport 0 by default. */
+		vport_param.adapter = adapter;
+		vport_param.devarg_id = 0;
+		vport_param.idx = idpf_vport_idx_alloc(adapter);
+		if (vport_param.idx == IDPF_INVALID_VPORT_IDX) {
+			PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
+			return 0;
+		}
 		snprintf(name, sizeof(name), "idpf_%s_vport_0",
 			 pci_dev->device.name);
 		retval = rte_eth_dev_create(&pci_dev->device, name,
 					    sizeof(struct idpf_vport),
-					    NULL, NULL, idpf_dev_init,
-					    adapter);
+					    NULL, NULL, idpf_dev_vport_init,
+					    &vport_param);
 		if (retval != 0)
 			PMD_DRV_LOG(ERR, "Failed to create default vport 0");
-		adapter->cur_vports |= RTE_BIT32(0);
-		adapter->cur_vport_nb++;
 	} else {
 		for (i = 0; i < adapter->req_vport_nb; i++) {
+			vport_param.adapter = adapter;
+			vport_param.devarg_id = adapter->req_vports[i];
+			vport_param.idx = idpf_vport_idx_alloc(adapter);
+			if (vport_param.idx == IDPF_INVALID_VPORT_IDX) {
+				PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
+				break;
+			}
 			snprintf(name, sizeof(name), "idpf_%s_vport_%d",
 				 pci_dev->device.name,
 				 adapter->req_vports[i]);
 			retval = rte_eth_dev_create(&pci_dev->device, name,
 						    sizeof(struct idpf_vport),
-						    NULL, NULL, idpf_dev_init,
-						    adapter);
+						    NULL, NULL, idpf_dev_vport_init,
+						    &vport_param);
 			if (retval != 0)
 				PMD_DRV_LOG(ERR, "Failed to create vport %d",
-					    adapter->req_vports[i]);
+					    vport_param.devarg_id);
 		}
 	}
 
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 991677c3bc..c236cc8f16 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -89,8 +89,16 @@ struct idpf_chunks_info {
 	uint32_t rx_buf_qtail_spacing;
 };
 
+struct idpf_vport_param {
+	struct idpf_adapter *adapter;
+	uint16_t devarg_id; /* arg id from user */
+	uint16_t idx;       /* index in adapter->vports[]*/
+};
+
 struct idpf_vport {
 	struct idpf_adapter *adapter; /* Backreference to associated adapter */
+	struct virtchnl2_create_vport *vport_info; /* virtchnl response info handling */
+	uint16_t sw_idx; /* SW index in adapter->vports[]*/
 	uint16_t vport_id;
 	uint32_t txq_model;
 	uint32_t rxq_model;
@@ -108,8 +116,6 @@ struct idpf_vport {
 	uint16_t rss_key_size;
 	uint16_t rss_lut_size;
 
-	uint16_t sw_idx; /* SW idx */
-
 	struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
 	uint16_t max_pkt_len; /* Maximum packet length */
 
@@ -146,16 +152,12 @@ struct idpf_adapter {
 	uint32_t txq_model; /* 0 - split queue model, non-0 - single queue model */
 	uint32_t rxq_model; /* 0 - split queue model, non-0 - single queue model */
 
-	/* Vport info */
-	uint8_t **vport_req_info;
-	uint8_t **vport_recv_info;
 	struct idpf_vport **vports;
 	uint16_t max_vport_nb;
 	uint16_t req_vports[IDPF_MAX_VPORT_NUM];
 	uint16_t req_vport_nb;
-	uint16_t cur_vports;
+	uint16_t cur_vports; /* bit mask of created vport */
 	uint16_t cur_vport_nb;
-	uint16_t cur_vport_idx;
 
 	uint16_t used_vecs_num;
 
@@ -231,7 +233,8 @@ void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int idpf_vc_check_api_version(struct idpf_adapter *adapter);
 int idpf_get_pkt_type(struct idpf_adapter *adapter);
 int idpf_vc_get_caps(struct idpf_adapter *adapter);
-int idpf_vc_create_vport(struct idpf_adapter *adapter);
+int idpf_vc_create_vport(struct idpf_vport *vport,
+			 struct virtchnl2_create_vport *vport_info);
 int idpf_vc_destroy_vport(struct idpf_vport *vport);
 int idpf_vc_set_rss_key(struct idpf_vport *vport);
 int idpf_vc_set_rss_lut(struct idpf_vport *vport);
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index ac6486d4ef..14b34619af 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -583,11 +583,10 @@ idpf_vc_get_caps(struct idpf_adapter *adapter)
 }
 
 int
-idpf_vc_create_vport(struct idpf_adapter *adapter)
+idpf_vc_create_vport(struct idpf_vport *vport,
+		     struct virtchnl2_create_vport *vport_req_info)
 {
-	uint16_t idx = adapter->cur_vport_idx;
-	struct virtchnl2_create_vport *vport_req_info =
-		(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];
+	struct idpf_adapter *adapter = vport->adapter;
 	struct virtchnl2_create_vport vport_msg;
 	struct idpf_cmd_info args;
 	int err = -1;
@@ -615,16 +614,7 @@ idpf_vc_create_vport(struct idpf_adapter *adapter)
 		return err;
 	}
 
-	if (adapter->vport_recv_info[idx] == NULL) {
-		adapter->vport_recv_info[idx] = rte_zmalloc(NULL,
-						    IDPF_DFLT_MBX_BUF_SIZE, 0);
-		if (adapter->vport_recv_info[idx] == NULL) {
-			PMD_INIT_LOG(ERR, "Failed to alloc vport_recv_info.");
-			return -ENOMEM;
-		}
-	}
-	rte_memcpy(adapter->vport_recv_info[idx], args.out_buffer,
-		   IDPF_DFLT_MBX_BUF_SIZE);
+	rte_memcpy(vport->vport_info, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
 	return 0;
 }
 
-- 
2.26.2


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v2 2/5] net/idpf: remove req vports from adapter
  2023-01-06  9:04 ` [PATCH v2 0/5] net/idpf: code refine beilei.xing
  2023-01-06  9:04   ` [PATCH v2 1/5] net/idpf: remove vport req and recv info from adapter beilei.xing
@ 2023-01-06  9:04   ` beilei.xing
  2023-01-06  9:04   ` [PATCH v2 3/5] net/idpf: fix splitq xmit free beilei.xing
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 11+ messages in thread
From: beilei.xing @ 2023-01-06  9:04 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, Jingjing Wu, Beilei Xing

From: Jingjing Wu <jingjing.wu@intel.com>

This patch refines idpf_adapter structure by removing req_vports.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c | 79 ++++++++++++++++++----------------
 drivers/net/idpf/idpf_ethdev.h |  9 +++-
 2 files changed, 49 insertions(+), 39 deletions(-)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index d8b7b069cf..f7b3f8f515 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -619,29 +619,30 @@ idpf_dev_close(struct rte_eth_dev *dev)
 }
 
 static int
-insert_value(struct idpf_adapter *adapter, uint16_t id)
+insert_value(struct idpf_devargs *devargs, uint16_t id)
 {
 	uint16_t i;
 
-	for (i = 0; i < adapter->req_vport_nb; i++) {
-		if (adapter->req_vports[i] == id)
+	/* ignore duplicate */
+	for (i = 0; i < devargs->req_vport_nb; i++) {
+		if (devargs->req_vports[i] == id)
 			return 0;
 	}
 
-	if (adapter->req_vport_nb >= RTE_DIM(adapter->req_vports)) {
+	if (devargs->req_vport_nb >= RTE_DIM(devargs->req_vports)) {
 		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
 			     IDPF_MAX_VPORT_NUM);
 		return -EINVAL;
 	}
 
-	adapter->req_vports[adapter->req_vport_nb] = id;
-	adapter->req_vport_nb++;
+	devargs->req_vports[devargs->req_vport_nb] = id;
+	devargs->req_vport_nb++;
 
 	return 0;
 }
 
 static const char *
-parse_range(const char *value, struct idpf_adapter *adapter)
+parse_range(const char *value, struct idpf_devargs *devargs)
 {
 	uint16_t lo, hi, i;
 	int n = 0;
@@ -652,13 +653,13 @@ parse_range(const char *value, struct idpf_adapter *adapter)
 	if (result == 1) {
 		if (lo >= IDPF_MAX_VPORT_NUM)
 			return NULL;
-		if (insert_value(adapter, lo) != 0)
+		if (insert_value(devargs, lo) != 0)
 			return NULL;
 	} else if (result == 2) {
 		if (lo > hi || hi >= IDPF_MAX_VPORT_NUM)
 			return NULL;
 		for (i = lo; i <= hi; i++) {
-			if (insert_value(adapter, i) != 0)
+			if (insert_value(devargs, i) != 0)
 				return NULL;
 		}
 	} else {
@@ -671,17 +672,16 @@ parse_range(const char *value, struct idpf_adapter *adapter)
 static int
 parse_vport(const char *key, const char *value, void *args)
 {
-	struct idpf_adapter *adapter = args;
+	struct idpf_devargs *devargs = args;
 	const char *pos = value;
-	int i;
 
-	adapter->req_vport_nb = 0;
+	devargs->req_vport_nb = 0;
 
 	if (*pos == '[')
 		pos++;
 
 	while (1) {
-		pos = parse_range(pos, adapter);
+		pos = parse_range(pos, devargs);
 		if (pos == NULL) {
 			PMD_INIT_LOG(ERR, "invalid value:\"%s\" for key:\"%s\", ",
 				     value, key);
@@ -698,21 +698,6 @@ parse_vport(const char *key, const char *value, void *args)
 		return -EINVAL;
 	}
 
-	if (adapter->cur_vport_nb + adapter->req_vport_nb >
-	    IDPF_MAX_VPORT_NUM) {
-		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
-			     IDPF_MAX_VPORT_NUM);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < adapter->req_vport_nb; i++) {
-		if (adapter->cur_vports & RTE_BIT32(adapter->req_vports[i])) {
-			PMD_INIT_LOG(ERR, "Vport %d has been created",
-				     adapter->req_vports[i]);
-			return -EINVAL;
-		}
-	}
-
 	return 0;
 }
 
@@ -738,13 +723,14 @@ parse_bool(const char *key, const char *value, void *args)
 }
 
 static int
-idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
+idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter,
+		   struct idpf_devargs *idpf_args)
 {
 	struct rte_devargs *devargs = pci_dev->device.devargs;
 	struct rte_kvargs *kvlist;
-	int ret;
+	int i, ret;
 
-	adapter->req_vport_nb = 0;
+	idpf_args->req_vport_nb = 0;
 
 	if (devargs == NULL)
 		return 0;
@@ -755,8 +741,26 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 		return -EINVAL;
 	}
 
+	/* check parsed devargs */
+	if (adapter->cur_vport_nb + idpf_args->req_vport_nb >
+	    IDPF_MAX_VPORT_NUM) {
+		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
+			     IDPF_MAX_VPORT_NUM);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	for (i = 0; i < idpf_args->req_vport_nb; i++) {
+		if (adapter->cur_vports & RTE_BIT32(idpf_args->req_vports[i])) {
+			PMD_INIT_LOG(ERR, "Vport %d has been created",
+				     idpf_args->req_vports[i]);
+			ret = -EINVAL;
+			goto bail;
+		}
+	}
+
 	ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
-				 adapter);
+				 idpf_args);
 	if (ret != 0)
 		goto bail;
 
@@ -1123,6 +1127,7 @@ idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 {
 	struct idpf_vport_param vport_param;
 	struct idpf_adapter *adapter;
+	struct idpf_devargs devargs;
 	char name[RTE_ETH_NAME_MAX_LEN];
 	int i, retval;
 	bool first_probe = false;
@@ -1154,13 +1159,13 @@ idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		rte_spinlock_unlock(&idpf_adapter_lock);
 	}
 
-	retval = idpf_parse_devargs(pci_dev, adapter);
+	retval = idpf_parse_devargs(pci_dev, adapter, &devargs);
 	if (retval != 0) {
 		PMD_INIT_LOG(ERR, "Failed to parse private devargs");
 		goto err;
 	}
 
-	if (adapter->req_vport_nb == 0) {
+	if (devargs.req_vport_nb == 0) {
 		/* If no vport devarg, create vport 0 by default. */
 		vport_param.adapter = adapter;
 		vport_param.devarg_id = 0;
@@ -1178,9 +1183,9 @@ idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		if (retval != 0)
 			PMD_DRV_LOG(ERR, "Failed to create default vport 0");
 	} else {
-		for (i = 0; i < adapter->req_vport_nb; i++) {
+		for (i = 0; i < devargs.req_vport_nb; i++) {
 			vport_param.adapter = adapter;
-			vport_param.devarg_id = adapter->req_vports[i];
+			vport_param.devarg_id = devargs.req_vports[i];
 			vport_param.idx = idpf_vport_idx_alloc(adapter);
 			if (vport_param.idx == IDPF_INVALID_VPORT_IDX) {
 				PMD_INIT_LOG(ERR, "No space for vport %u", vport_param.devarg_id);
@@ -1188,7 +1193,7 @@ idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 			}
 			snprintf(name, sizeof(name), "idpf_%s_vport_%d",
 				 pci_dev->device.name,
-				 adapter->req_vports[i]);
+				 devargs.req_vports[i]);
 			retval = rte_eth_dev_create(&pci_dev->device, name,
 						    sizeof(struct idpf_vport),
 						    NULL, NULL, idpf_dev_vport_init,
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index c236cc8f16..bf37d5184c 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -137,6 +137,12 @@ struct idpf_vport {
 	bool stopped;
 };
 
+/* Struct used when parse driver specific devargs */
+struct idpf_devargs {
+	uint16_t req_vports[IDPF_MAX_VPORT_NUM];
+	uint16_t req_vport_nb;
+};
+
 struct idpf_adapter {
 	TAILQ_ENTRY(idpf_adapter) next;
 	struct idpf_hw hw;
@@ -154,8 +160,7 @@ struct idpf_adapter {
 
 	struct idpf_vport **vports;
 	uint16_t max_vport_nb;
-	uint16_t req_vports[IDPF_MAX_VPORT_NUM];
-	uint16_t req_vport_nb;
+
 	uint16_t cur_vports; /* bit mask of created vport */
 	uint16_t cur_vport_nb;
 
-- 
2.26.2


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v2 3/5] net/idpf: fix splitq xmit free
  2023-01-06  9:04 ` [PATCH v2 0/5] net/idpf: code refine beilei.xing
  2023-01-06  9:04   ` [PATCH v2 1/5] net/idpf: remove vport req and recv info from adapter beilei.xing
  2023-01-06  9:04   ` [PATCH v2 2/5] net/idpf: remove req vports " beilei.xing
@ 2023-01-06  9:04   ` beilei.xing
  2023-01-06  9:05   ` [PATCH v2 4/5] net/idpf: fix driver init symbols beilei.xing
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 11+ messages in thread
From: beilei.xing @ 2023-01-06  9:04 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, Jingjing Wu, stable, Beilei Xing

From: Jingjing Wu <jingjing.wu@intel.com>

When context descriptor is used during sending packets, mbuf
is not freed correctly, it will cause mempool be exhausted.
This patch refines the free function.

Fixes: 770f4dfe0f79 ("net/idpf: support basic Tx data path")
Cc: stable@dpdk.org

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/idpf/idpf_rxtx.c | 29 +++++++++++++++++++----------
 1 file changed, 19 insertions(+), 10 deletions(-)

diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index b4a396c3f5..5aef8ba2b6 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -1508,6 +1508,7 @@ idpf_split_tx_free(struct idpf_tx_queue *cq)
 	struct idpf_tx_entry *txe;
 	struct idpf_tx_queue *txq;
 	uint16_t gen, qid, q_head;
+	uint16_t nb_desc_clean;
 	uint8_t ctype;
 
 	txd = &compl_ring[next];
@@ -1525,20 +1526,24 @@ idpf_split_tx_free(struct idpf_tx_queue *cq)
 
 	switch (ctype) {
 	case IDPF_TXD_COMPLT_RE:
-		if (q_head == 0)
-			txq->last_desc_cleaned = txq->nb_tx_desc - 1;
-		else
-			txq->last_desc_cleaned = q_head - 1;
-		if (unlikely((txq->last_desc_cleaned % 32) == 0)) {
+		/* clean to q_head which indicates be fetched txq desc id + 1.
+		 * TODO: need to refine and remove the if condition.
+		 */
+		if (unlikely(q_head % 32)) {
 			PMD_DRV_LOG(ERR, "unexpected desc (head = %u) completion.",
 						q_head);
 			return;
 		}
-
+		if (txq->last_desc_cleaned > q_head)
+			nb_desc_clean = (txq->nb_tx_desc - txq->last_desc_cleaned) +
+				q_head;
+		else
+			nb_desc_clean = q_head - txq->last_desc_cleaned;
+		txq->nb_free += nb_desc_clean;
+		txq->last_desc_cleaned = q_head;
 		break;
 	case IDPF_TXD_COMPLT_RS:
-		txq->nb_free++;
-		txq->nb_used--;
+		/* q_head indicates sw_id when ctype is 2 */
 		txe = &txq->sw_ring[q_head];
 		if (txe->mbuf != NULL) {
 			rte_pktmbuf_free_seg(txe->mbuf);
@@ -1693,12 +1698,16 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		/* fill the last descriptor with End of Packet (EOP) bit */
 		txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_EOP;
 
-		if (unlikely((tx_id % 32) == 0))
-			txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_RE;
 		if (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK)
 			txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
 		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
 		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
+
+		if (txq->nb_used >= 32) {
+			txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_RE;
+			/* Update txq RE bit counters */
+			txq->nb_used = 0;
+		}
 	}
 
 	/* update the tail pointer if any packets were processed */
-- 
2.26.2


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v2 4/5] net/idpf: fix driver init symbols
  2023-01-06  9:04 ` [PATCH v2 0/5] net/idpf: code refine beilei.xing
                     ` (2 preceding siblings ...)
  2023-01-06  9:04   ` [PATCH v2 3/5] net/idpf: fix splitq xmit free beilei.xing
@ 2023-01-06  9:05   ` beilei.xing
  2023-01-06  9:05   ` [PATCH v2 5/5] net/idpf: refine MTU setting beilei.xing
  2023-01-16  7:59   ` [PATCH v2 0/5] net/idpf: code refine Zhang, Qi Z
  5 siblings, 0 replies; 11+ messages in thread
From: beilei.xing @ 2023-01-06  9:05 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, Jingjing Wu, stable, Beilei Xing

From: Jingjing Wu <jingjing.wu@intel.com>

This patch fixes idpf driver init symbols.

Fixes: 549343c25db8 ("net/idpf: support device initialization")
Cc: stable@dpdk.org

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index f7b3f8f515..89af27ca34 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -1251,7 +1251,11 @@ static struct rte_pci_driver rte_idpf_pmd = {
  */
 RTE_PMD_REGISTER_PCI(net_idpf, rte_idpf_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_idpf, pci_id_idpf_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_KMOD_DEP(net_idpf, "* igb_uio | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_idpf,
+			      IDPF_TX_SINGLE_Q "=<0|1> "
+			      IDPF_RX_SINGLE_Q "=<0|1> "
+			      IDPF_VPORT "=[vport_set0,[vport_set1],...]");
 
 RTE_LOG_REGISTER_SUFFIX(idpf_logtype_init, init, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(idpf_logtype_driver, driver, NOTICE);
-- 
2.26.2


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v2 5/5] net/idpf: refine MTU setting
  2023-01-06  9:04 ` [PATCH v2 0/5] net/idpf: code refine beilei.xing
                     ` (3 preceding siblings ...)
  2023-01-06  9:05   ` [PATCH v2 4/5] net/idpf: fix driver init symbols beilei.xing
@ 2023-01-06  9:05   ` beilei.xing
  2023-01-16  7:59   ` [PATCH v2 0/5] net/idpf: code refine Zhang, Qi Z
  5 siblings, 0 replies; 11+ messages in thread
From: beilei.xing @ 2023-01-06  9:05 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, Jingjing Wu, Beilei Xing

From: Jingjing Wu <jingjing.wu@intel.com>

This patch refines MTU configuration.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c | 37 +++++++++++++++++++---------------
 drivers/net/idpf/idpf_ethdev.h |  1 +
 2 files changed, 22 insertions(+), 16 deletions(-)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 89af27ca34..3f1b77144c 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -56,9 +56,9 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_rx_queues = adapter->caps->max_rx_q;
 	dev_info->max_tx_queues = adapter->caps->max_tx_q;
 	dev_info->min_rx_bufsize = IDPF_MIN_BUF_SIZE;
-	dev_info->max_rx_pktlen = IDPF_MAX_FRAME_SIZE;
+	dev_info->max_rx_pktlen = vport->max_mtu + IDPF_ETH_OVERHEAD;
 
-	dev_info->max_mtu = dev_info->max_rx_pktlen - IDPF_ETH_OVERHEAD;
+	dev_info->max_mtu = vport->max_mtu;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
 	dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL;
@@ -104,14 +104,23 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 }
 
 static int
-idpf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
+idpf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
+	struct idpf_vport *vport = dev->data->dev_private;
+
 	/* mtu setting is forbidden if port is start */
 	if (dev->data->dev_started) {
 		PMD_DRV_LOG(ERR, "port must be stopped before configuration");
 		return -EBUSY;
 	}
 
+	if (mtu > vport->max_mtu) {
+		PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
+		return -EINVAL;
+	}
+
+	vport->max_pkt_len = mtu + IDPF_ETH_OVERHEAD;
+
 	return 0;
 }
 
@@ -381,6 +390,10 @@ idpf_dev_configure(struct rte_eth_dev *dev)
 		return -1;
 	}
 
+	vport->max_pkt_len =
+		(dev->data->mtu == 0) ? IDPF_DEFAULT_MTU : dev->data->mtu +
+		IDPF_ETH_OVERHEAD;
+
 	return 0;
 }
 
@@ -513,39 +526,31 @@ idpf_dev_start(struct rte_eth_dev *dev)
 
 	vport->stopped = 0;
 
-	if (dev->data->mtu > vport->max_mtu) {
-		PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
-		ret = -EINVAL;
-		goto err_mtu;
-	}
-
-	vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
-
 	req_vecs_num = IDPF_DFLT_Q_VEC_NUM;
 	if (req_vecs_num + adapter->used_vecs_num > num_allocated_vectors) {
 		PMD_DRV_LOG(ERR, "The accumulated request vectors' number should be less than %d",
 			    num_allocated_vectors);
 		ret = -EINVAL;
-		goto err_mtu;
+		goto err_vec;
 	}
 
 	ret = idpf_vc_alloc_vectors(vport, req_vecs_num);
 	if (ret != 0) {
 		PMD_DRV_LOG(ERR, "Failed to allocate interrupt vectors");
-		goto err_mtu;
+		goto err_vec;
 	}
 	adapter->used_vecs_num += req_vecs_num;
 
 	ret = idpf_config_rx_queues_irqs(dev);
 	if (ret != 0) {
 		PMD_DRV_LOG(ERR, "Failed to configure irqs");
-		goto err_mtu;
+		goto err_vec;
 	}
 
 	ret = idpf_start_queues(dev);
 	if (ret != 0) {
 		PMD_DRV_LOG(ERR, "Failed to start queues");
-		goto err_mtu;
+		goto err_vec;
 	}
 
 	idpf_set_rx_function(dev);
@@ -561,7 +566,7 @@ idpf_dev_start(struct rte_eth_dev *dev)
 
 err_vport:
 	idpf_stop_queues(dev);
-err_mtu:
+err_vec:
 	return ret;
 }
 
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index bf37d5184c..b0746e5041 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -39,6 +39,7 @@
 #define IDPF_MIN_BUF_SIZE	1024
 #define IDPF_MAX_FRAME_SIZE	9728
 #define IDPF_MIN_FRAME_SIZE	14
+#define IDPF_DEFAULT_MTU	RTE_ETHER_MTU
 
 #define IDPF_NUM_MACADDR_MAX	64
 
-- 
2.26.2


^ permalink raw reply	[flat|nested] 11+ messages in thread

* RE: [PATCH v2 0/5] net/idpf: code refine
  2023-01-06  9:04 ` [PATCH v2 0/5] net/idpf: code refine beilei.xing
                     ` (4 preceding siblings ...)
  2023-01-06  9:05   ` [PATCH v2 5/5] net/idpf: refine MTU setting beilei.xing
@ 2023-01-16  7:59   ` Zhang, Qi Z
  5 siblings, 0 replies; 11+ messages in thread
From: Zhang, Qi Z @ 2023-01-16  7:59 UTC (permalink / raw)
  To: Xing, Beilei; +Cc: dev



> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, January 6, 2023 5:05 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>
> Subject: [PATCH v2 0/5] net/idpf: code refine
> 
> From: Beilei Xing <beilei.xing@intel.com>
> 
> 1. Remove some unnecessary fields from idpf_adapter structure.
> 2. Fix xmit free for split queue model.
> 3. Fix driver init symbols.
> 4. Refine MTU configuration.
> 
> V2 changes:
>  - fix driver init symbols
>  - refine MTU setting
> 
> Jingjing Wu (5):
>   net/idpf: remove vport req and recv info from adapter
>   net/idpf: remove req vports from adapter
>   net/idpf: fix splitq xmit free
>   net/idpf: fix driver init symbols
>   net/idpf: refine MTU setting
> 
>  drivers/net/idpf/idpf_ethdev.c | 301 ++++++++++++++-------------------
> drivers/net/idpf/idpf_ethdev.h |  29 ++--
>  drivers/net/idpf/idpf_rxtx.c   |  29 ++--
>  drivers/net/idpf/idpf_vchnl.c  |  18 +-
>  4 files changed, 166 insertions(+), 211 deletions(-)
> 
> --
> 2.26.2

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2023-01-16  7:59 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-12-08  7:27 [PATCH 0/3] net/idpf: code refine beilei.xing
2022-12-08  7:27 ` [PATCH 1/3] net/idpf: remove vport req and recv info from adapter beilei.xing
2022-12-08  7:27 ` [PATCH 2/3] net/idpf: remove req vports " beilei.xing
2022-12-08  7:27 ` [PATCH 3/3] net/idpf: fix splitq xmit free beilei.xing
2023-01-06  9:04 ` [PATCH v2 0/5] net/idpf: code refine beilei.xing
2023-01-06  9:04   ` [PATCH v2 1/5] net/idpf: remove vport req and recv info from adapter beilei.xing
2023-01-06  9:04   ` [PATCH v2 2/5] net/idpf: remove req vports " beilei.xing
2023-01-06  9:04   ` [PATCH v2 3/5] net/idpf: fix splitq xmit free beilei.xing
2023-01-06  9:05   ` [PATCH v2 4/5] net/idpf: fix driver init symbols beilei.xing
2023-01-06  9:05   ` [PATCH v2 5/5] net/idpf: refine MTU setting beilei.xing
2023-01-16  7:59   ` [PATCH v2 0/5] net/idpf: code refine Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).