DPDK patches and discussions
 help / color / mirror / Atom feed
From: Mingxia Liu <mingxia.liu@intel.com>
To: dev@dpdk.org
Cc: jingjing.wu@intel.com, beilei.xing@intel.com,
	Mingxia Liu <mingxia.liu@intel.com>
Subject: [PATCH v2] net/idpf: refine devargs parse functions
Date: Mon, 24 Apr 2023 10:39:43 +0000	[thread overview]
Message-ID: <20230424103943.64095-1-mingxia.liu@intel.com> (raw)
In-Reply-To: <20230421071456.297774-1-mingxia.liu@intel.com>

This patch refines devargs parsing functions and use valid
variable max_vport_nb to replace IDPF_MAX_VPORT_NUM.

Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c | 61 +++++++++++++++++-----------------
 1 file changed, 30 insertions(+), 31 deletions(-)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index e02ec2ec5a..a8dd5a0a80 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -857,12 +857,6 @@ insert_value(struct idpf_devargs *devargs, uint16_t id)
 			return 0;
 	}
 
-	if (devargs->req_vport_nb >= RTE_DIM(devargs->req_vports)) {
-		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
-			     IDPF_MAX_VPORT_NUM);
-		return -EINVAL;
-	}
-
 	devargs->req_vports[devargs->req_vport_nb] = id;
 	devargs->req_vport_nb++;
 
@@ -879,12 +873,10 @@ parse_range(const char *value, struct idpf_devargs *devargs)
 
 	result = sscanf(value, "%hu%n-%hu%n", &lo, &n, &hi, &n);
 	if (result == 1) {
-		if (lo >= IDPF_MAX_VPORT_NUM)
-			return NULL;
 		if (insert_value(devargs, lo) != 0)
 			return NULL;
 	} else if (result == 2) {
-		if (lo > hi || hi >= IDPF_MAX_VPORT_NUM)
+		if (lo > hi)
 			return NULL;
 		for (i = lo; i <= hi; i++) {
 			if (insert_value(devargs, i) != 0)
@@ -969,40 +961,46 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *adap
 		return -EINVAL;
 	}
 
+	ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
+				 idpf_args);
+	if (ret != 0)
+		goto fail;
+
+	ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
+				 &adapter->base.is_tx_singleq);
+	if (ret != 0)
+		goto fail;
+
+	ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
+				 &adapter->base.is_rx_singleq);
+	if (ret != 0)
+		goto fail;
+
 	/* check parsed devargs */
 	if (adapter->cur_vport_nb + idpf_args->req_vport_nb >
-	    IDPF_MAX_VPORT_NUM) {
+	    adapter->max_vport_nb) {
 		PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
-			     IDPF_MAX_VPORT_NUM);
+			     adapter->max_vport_nb);
 		ret = -EINVAL;
-		goto bail;
+		goto fail;
 	}
 
 	for (i = 0; i < idpf_args->req_vport_nb; i++) {
+		if (idpf_args->req_vports[i] > adapter->max_vport_nb - 1) {
+			PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
+				     idpf_args->req_vports[i], adapter->max_vport_nb - 1);
+			ret = -EINVAL;
+			goto fail;
+		}
 		if (adapter->cur_vports & RTE_BIT32(idpf_args->req_vports[i])) {
-			PMD_INIT_LOG(ERR, "Vport %d has been created",
+			PMD_INIT_LOG(ERR, "Vport %d has been requested",
 				     idpf_args->req_vports[i]);
 			ret = -EINVAL;
-			goto bail;
+			goto fail;
 		}
 	}
 
-	ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
-				 idpf_args);
-	if (ret != 0)
-		goto bail;
-
-	ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
-				 &adapter->base.is_tx_singleq);
-	if (ret != 0)
-		goto bail;
-
-	ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
-				 &adapter->base.is_rx_singleq);
-	if (ret != 0)
-		goto bail;
-
-bail:
+fail:
 	rte_kvargs_free(kvlist);
 	return ret;
 }
@@ -1152,7 +1150,8 @@ idpf_adapter_ext_init(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *a
 
 	rte_eal_alarm_set(IDPF_ALARM_INTERVAL, idpf_dev_alarm_handler, adapter);
 
-	adapter->max_vport_nb = adapter->base.caps.max_vports;
+	adapter->max_vport_nb = adapter->base.caps.max_vports > IDPF_MAX_VPORT_NUM ?
+				IDPF_MAX_VPORT_NUM : adapter->base.caps.max_vports;
 
 	adapter->vports = rte_zmalloc("vports",
 				      adapter->max_vport_nb *
-- 
2.34.1


  parent reply	other threads:[~2023-04-24  2:23 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-21  7:14 [PATCH] " Mingxia Liu
2023-04-23  8:55 ` Wu, Jingjing
2023-04-24  2:19   ` Liu, Mingxia
2023-04-24 10:39 ` Mingxia Liu [this message]
2023-04-24 10:43   ` [PATCH v3] " Mingxia Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230424103943.64095-1-mingxia.liu@intel.com \
    --to=mingxia.liu@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).