From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5751F429A0; Fri, 21 Apr 2023 10:13:30 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 48CE8410FB; Fri, 21 Apr 2023 10:13:30 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id 70034410DD for ; Fri, 21 Apr 2023 10:13:28 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1682064808; x=1713600808; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=BSn9TEOFXRotjufb3DRlE2PANPD4H92AVD0beR+SAUE=; b=DvGfvKaPv2pEuQXkKlTsRGLMniHYHB7B7s/RiHHonpJyBmPo8gmhDSpB q6u3rj49Ecu/pi4dL0wjnVjEhqpSdCb6ktdJstIKMj+ik7dFB8ZkJsszW kjy1LolWrKaF6JuCK6GYpCwr69l5tyaIhrUsxj6/AUYg4mbxnt+PMdYRh KR6VcXU6oCMZAjCIoT9Yx9OD9ESLNjjy/jsRcwzWC8w12jNGBSAazA0yM 2DoX3ox0ajmRUGqr0UZ3cKLjZnhC/nZIn4geXXhQi0FfB7xWh+fVsyOGl 3b871pd5R1WTssw73cl6pfeGgwwz1Y6XzJCL4MVKyudIGPwl1tc83twht g==; X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="344697941" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="344697941" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Apr 2023 01:13:27 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10686"; a="685657058" X-IronPort-AV: E=Sophos;i="5.99,214,1677571200"; d="scan'208";a="685657058" Received: from dpdk-mingxial-01.sh.intel.com ([10.67.118.237]) by orsmga007.jf.intel.com with ESMTP; 21 Apr 2023 01:13:25 -0700 From: Mingxia Liu To: dev@dpdk.org Cc: jingjing.wu@intel.com, beilei.xing@intel.com, Mingxia Liu Subject: [PATCH] net/idpf: refine devargs parse functions Date: Fri, 21 Apr 2023 07:14:56 +0000 Message-Id: <20230421071456.297774-1-mingxia.liu@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch refines devargs parsing functions and use valid variable max_vport_nb to replace IDPF_MAX_VPORT_NUM. Signed-off-by: Mingxia Liu --- drivers/net/idpf/idpf_ethdev.c | 61 +++++++++++++++++----------------- 1 file changed, 30 insertions(+), 31 deletions(-) diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index e02ec2ec5a..a8dd5a0a80 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -857,12 +857,6 @@ insert_value(struct idpf_devargs *devargs, uint16_t id) return 0; } - if (devargs->req_vport_nb >= RTE_DIM(devargs->req_vports)) { - PMD_INIT_LOG(ERR, "Total vport number can't be > %d", - IDPF_MAX_VPORT_NUM); - return -EINVAL; - } - devargs->req_vports[devargs->req_vport_nb] = id; devargs->req_vport_nb++; @@ -879,12 +873,10 @@ parse_range(const char *value, struct idpf_devargs *devargs) result = sscanf(value, "%hu%n-%hu%n", &lo, &n, &hi, &n); if (result == 1) { - if (lo >= IDPF_MAX_VPORT_NUM) - return NULL; if (insert_value(devargs, lo) != 0) return NULL; } else if (result == 2) { - if (lo > hi || hi >= IDPF_MAX_VPORT_NUM) + if (lo > hi) return NULL; for (i = lo; i <= hi; i++) { if (insert_value(devargs, i) != 0) @@ -969,40 +961,46 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *adap return -EINVAL; } + ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport, + idpf_args); + if (ret != 0) + goto fail; + + ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool, + &adapter->base.is_tx_singleq); + if (ret != 0) + goto fail; + + ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool, + &adapter->base.is_rx_singleq); + if (ret != 0) + goto fail; + /* check parsed devargs */ if (adapter->cur_vport_nb + idpf_args->req_vport_nb > - IDPF_MAX_VPORT_NUM) { + adapter->max_vport_nb) { PMD_INIT_LOG(ERR, "Total vport number can't be > %d", - IDPF_MAX_VPORT_NUM); + adapter->max_vport_nb); ret = -EINVAL; - goto bail; + goto fail; } for (i = 0; i < idpf_args->req_vport_nb; i++) { + if (idpf_args->req_vports[i] > adapter->max_vport_nb - 1) { + PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d", + idpf_args->req_vports[i], adapter->max_vport_nb - 1); + ret = -EINVAL; + goto fail; + } if (adapter->cur_vports & RTE_BIT32(idpf_args->req_vports[i])) { - PMD_INIT_LOG(ERR, "Vport %d has been created", + PMD_INIT_LOG(ERR, "Vport %d has been requested", idpf_args->req_vports[i]); ret = -EINVAL; - goto bail; + goto fail; } } - ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport, - idpf_args); - if (ret != 0) - goto bail; - - ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool, - &adapter->base.is_tx_singleq); - if (ret != 0) - goto bail; - - ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool, - &adapter->base.is_rx_singleq); - if (ret != 0) - goto bail; - -bail: +fail: rte_kvargs_free(kvlist); return ret; } @@ -1152,7 +1150,8 @@ idpf_adapter_ext_init(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *a rte_eal_alarm_set(IDPF_ALARM_INTERVAL, idpf_dev_alarm_handler, adapter); - adapter->max_vport_nb = adapter->base.caps.max_vports; + adapter->max_vport_nb = adapter->base.caps.max_vports > IDPF_MAX_VPORT_NUM ? + IDPF_MAX_VPORT_NUM : adapter->base.caps.max_vports; adapter->vports = rte_zmalloc("vports", adapter->max_vport_nb * -- 2.34.1