From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id C951D93C0 for ; Tue, 5 Jan 2016 17:35:15 +0100 (CET) Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga103.fm.intel.com with ESMTP; 05 Jan 2016 08:35:15 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,525,1444719600"; d="scan'208";a="720655431" Received: from irvmail001.ir.intel.com ([163.33.26.43]) by orsmga003.jf.intel.com with ESMTP; 05 Jan 2016 08:35:04 -0800 Received: from sivswdev02.ir.intel.com (sivswdev02.ir.intel.com [10.237.217.46]) by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id u05GZ2x7031483; Tue, 5 Jan 2016 16:35:02 GMT Received: from sivswdev02.ir.intel.com (localhost [127.0.0.1]) by sivswdev02.ir.intel.com with ESMTP id u05GZ2mx027415; Tue, 5 Jan 2016 16:35:02 GMT Received: (from reshmapa@localhost) by sivswdev02.ir.intel.com with id u05GZ2nb027411; Tue, 5 Jan 2016 16:35:02 GMT From: Reshma Pattan To: konstantin.ananyev@intel.com, declan.doherty@intel.com, thomas.monjalon@6wind.com Date: Tue, 5 Jan 2016 16:34:58 +0000 Message-Id: <1452011698-27354-4-git-send-email-reshma.pattan@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1452011698-27354-1-git-send-email-reshma.pattan@intel.com> References: <1450873172-21932-1-git-send-email-reshma.pattan@intel.com> <1452011698-27354-1-git-send-email-reshma.pattan@intel.com> Cc: dev@dpdk.org Subject: [dpdk-dev] [PATCH v3 3/3] librte_ether: fix rte_eth_dev_configure X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 05 Jan 2016 16:35:16 -0000 User should be able to configure ethdev with zero rx/tx queues, but both should not be zero. After above change, rte_eth_dev_tx_queue_config, rte_eth_dev_rx_queue_config should allocate memory for rx/tx queues only when number of rx/tx queues are nonzero. Signed-off-by: Reshma Pattan --- lib/librte_ether/rte_ethdev.c | 36 ++++++++++++++++++++++++------------ 1 files changed, 24 insertions(+), 12 deletions(-) diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index 5849102..a7647b6 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -673,7 +673,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) void **rxq; unsigned i; - if (dev->data->rx_queues == NULL) { /* first time configuration */ + if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", sizeof(dev->data->rx_queues[0]) * nb_queues, RTE_CACHE_LINE_SIZE); @@ -681,7 +681,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) dev->data->nb_rx_queues = 0; return -(ENOMEM); } - } else { /* re-configure */ + } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); rxq = dev->data->rx_queues; @@ -701,6 +701,13 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) dev->data->rx_queues = rxq; + } else if (dev->data->rx_queues != NULL && nb_queues == 0) { + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); + + rxq = dev->data->rx_queues; + + for (i = nb_queues; i < old_nb_queues; i++) + (*dev->dev_ops->rx_queue_release)(rxq[i]); } dev->data->nb_rx_queues = nb_queues; return 0; @@ -817,7 +824,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) void **txq; unsigned i; - if (dev->data->tx_queues == NULL) { /* first time configuration */ + if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", sizeof(dev->data->tx_queues[0]) * nb_queues, RTE_CACHE_LINE_SIZE); @@ -825,7 +832,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) dev->data->nb_tx_queues = 0; return -(ENOMEM); } - } else { /* re-configure */ + } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); txq = dev->data->tx_queues; @@ -845,6 +852,13 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) dev->data->tx_queues = txq; + } else if (dev->data->tx_queues != NULL && nb_queues == 0) { + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); + + txq = dev->data->tx_queues; + + for (i = nb_queues; i < old_nb_queues; i++) + (*dev->dev_ops->tx_queue_release)(txq[i]); } dev->data->nb_tx_queues = nb_queues; return 0; @@ -891,25 +905,23 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, * configured device. */ (*dev->dev_ops->dev_infos_get)(dev, &dev_info); + + if (nb_rx_q == 0 && nb_tx_q == 0) { + RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id); + return -EINVAL; + } + if (nb_rx_q > dev_info.max_rx_queues) { RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n", port_id, nb_rx_q, dev_info.max_rx_queues); return -EINVAL; } - if (nb_rx_q == 0) { - RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id); - return -EINVAL; - } if (nb_tx_q > dev_info.max_tx_queues) { RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n", port_id, nb_tx_q, dev_info.max_tx_queues); return -EINVAL; } - if (nb_tx_q == 0) { - RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id); - return -EINVAL; - } /* Copy the dev_conf parameter into the dev structure */ memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); -- 1.7.4.1