From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D7F56457C0; Wed, 14 Aug 2024 12:50:33 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B298642D27; Wed, 14 Aug 2024 12:50:26 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.9]) by mails.dpdk.org (Postfix) with ESMTP id 74E91427DC for ; Wed, 14 Aug 2024 12:50:24 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1723632624; x=1755168624; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=XdZBnuHwj10DAd6oLEoigRUrOh6I3Fsw+L2bcove6/M=; b=Qpvul8g/1Jr/4A1jz/4BgB2aSQWsyctNxLzzMHMXk6plcPIb5Fp5m2Cd QeTp+lQ/5APL8kiCEXoiQmwagjLKemqRROSz/mYxP7Jx0WQM+kMjJ9NLs Gm5zLEi0BGQY7xSofdTLv8IICq0nZ2SrfQPvJBWEeo+qrwvSp1ddlD+bd VQMy6Qob6T9YzzQYkpRzjpFLQU5ZIsxwFm5xbJL3g5dXxZn4XDWvbED14 U++EtKkSzEEbH982fXm6tSg41jpkSr71EFqfBV8AJhlEfEvxyIMJsNLj2 EC6xPZ1rJ0EyFXoq9H91vs8Ij+JIZTu/U/Ot54Ix15RPEStk6T0Q0HbBM Q==; X-CSE-ConnectionGUID: eChWSzaIR3S75MyeC3b/dA== X-CSE-MsgGUID: TldbazGhRc2fTb40NzX7fA== X-IronPort-AV: E=McAfee;i="6700,10204,11163"; a="44360262" X-IronPort-AV: E=Sophos;i="6.09,145,1716274800"; d="scan'208";a="44360262" Received: from orviesa001.jf.intel.com ([10.64.159.141]) by orvoesa101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 14 Aug 2024 03:50:24 -0700 X-CSE-ConnectionGUID: Mp8nJHB1QyatcbRbjwgJZQ== X-CSE-MsgGUID: bVLBZMRMTImvLJ3A494d1Q== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.09,145,1716274800"; d="scan'208";a="96481668" Received: from unknown (HELO silpixa00401385.ir.intel.com) ([10.237.214.25]) by orviesa001.jf.intel.com with ESMTP; 14 Aug 2024 03:50:23 -0700 From: Bruce Richardson To: dev@dpdk.org Cc: ferruh.yigit@amd.com, thomas@monjalon.net, mb@smartsharesystems.com, Bruce Richardson Subject: [PATCH v3 05/26] ethdev: use separate Rx and Tx queue limits Date: Wed, 14 Aug 2024 11:49:11 +0100 Message-ID: <20240814104933.14062-6-bruce.richardson@intel.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240814104933.14062-1-bruce.richardson@intel.com> References: <20240812132910.162252-1-bruce.richardson@intel.com> <20240814104933.14062-1-bruce.richardson@intel.com> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Update library to use the new defines RTE_MAX_ETHPORT_TX_QUEUES and RTE_MAX_ETHPORT_RX_QUEUES rather than the old define RTE_MAX_QUEUES_PER_PORT. Signed-off-by: Bruce Richardson Acked-by: Morten Brørup --- lib/ethdev/ethdev_driver.h | 8 ++++---- lib/ethdev/ethdev_private.c | 24 ++++++++++++++---------- lib/ethdev/rte_ethdev.c | 16 +++++++--------- lib/ethdev/rte_ethdev.h | 18 +++++++++--------- 4 files changed, 34 insertions(+), 32 deletions(-) diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h index 883e59a927..51ec8e8395 100644 --- a/lib/ethdev/ethdev_driver.h +++ b/lib/ethdev/ethdev_driver.h @@ -84,12 +84,12 @@ struct __rte_cache_aligned rte_eth_dev { * User-supplied functions called from rx_burst to post-process * received packets before passing them to the user */ - RTE_ATOMIC(struct rte_eth_rxtx_callback *) post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT]; + RTE_ATOMIC(struct rte_eth_rxtx_callback *) post_rx_burst_cbs[RTE_MAX_ETHPORT_RX_QUEUES]; /** * User-supplied functions called from tx_burst to pre-process * received packets before passing them to the driver for transmission */ - RTE_ATOMIC(struct rte_eth_rxtx_callback *) pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT]; + RTE_ATOMIC(struct rte_eth_rxtx_callback *) pre_tx_burst_cbs[RTE_MAX_ETHPORT_TX_QUEUES]; enum rte_eth_dev_state state; /**< Flag indicating the port state */ void *security_ctx; /**< Context for security ops */ @@ -165,9 +165,9 @@ struct __rte_cache_aligned rte_eth_dev_data { flow_configured : 1; /** Queues state: HAIRPIN(2) / STARTED(1) / STOPPED(0) */ - uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT]; + uint8_t rx_queue_state[RTE_MAX_ETHPORT_RX_QUEUES]; /** Queues state: HAIRPIN(2) / STARTED(1) / STOPPED(0) */ - uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT]; + uint8_t tx_queue_state[RTE_MAX_ETHPORT_TX_QUEUES]; uint32_t dev_flags; /**< Capabilities */ int numa_node; /**< NUMA node connection */ diff --git a/lib/ethdev/ethdev_private.c b/lib/ethdev/ethdev_private.c index 626524558a..e00530f370 100644 --- a/lib/ethdev/ethdev_private.c +++ b/lib/ethdev/ethdev_private.c @@ -190,7 +190,8 @@ struct dummy_queue { bool rx_warn_once; bool tx_warn_once; }; -static struct dummy_queue *dummy_queues_array[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT]; +static struct dummy_queue *dummy_rxq_array[RTE_MAX_ETHPORTS][RTE_MAX_ETHPORT_RX_QUEUES]; +static struct dummy_queue *dummy_txq_array[RTE_MAX_ETHPORTS][RTE_MAX_ETHPORT_TX_QUEUES]; static struct dummy_queue per_port_queues[RTE_MAX_ETHPORTS]; RTE_INIT(dummy_queue_init) { @@ -199,8 +200,10 @@ RTE_INIT(dummy_queue_init) for (port_id = 0; port_id < RTE_DIM(per_port_queues); port_id++) { unsigned int q; - for (q = 0; q < RTE_DIM(dummy_queues_array[port_id]); q++) - dummy_queues_array[port_id][q] = &per_port_queues[port_id]; + for (q = 0; q < RTE_DIM(dummy_rxq_array[port_id]); q++) + dummy_rxq_array[port_id][q] = &per_port_queues[port_id]; + for (q = 0; q < RTE_DIM(dummy_txq_array[port_id]); q++) + dummy_txq_array[port_id][q] = &per_port_queues[port_id]; } } @@ -245,7 +248,8 @@ dummy_eth_tx_burst(void *txq, void eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo) { - static RTE_ATOMIC(void *) dummy_data[RTE_MAX_QUEUES_PER_PORT]; + static RTE_ATOMIC(void *) dummy_rx_data[RTE_MAX_ETHPORT_RX_QUEUES]; + static RTE_ATOMIC(void *) dummy_tx_data[RTE_MAX_ETHPORT_TX_QUEUES]; uintptr_t port_id = fpo - rte_eth_fp_ops; per_port_queues[port_id].rx_warn_once = false; @@ -254,12 +258,12 @@ eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo) .rx_pkt_burst = dummy_eth_rx_burst, .tx_pkt_burst = dummy_eth_tx_burst, .rxq = { - .data = (void **)&dummy_queues_array[port_id], - .clbk = dummy_data, + .data = (void **)&dummy_rxq_array[port_id], + .clbk = dummy_rx_data, }, .txq = { - .data = (void **)&dummy_queues_array[port_id], - .clbk = dummy_data, + .data = (void **)&dummy_txq_array[port_id], + .clbk = dummy_tx_data, }, }; } @@ -420,7 +424,7 @@ eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */ dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", sizeof(dev->data->rx_queues[0]) * - RTE_MAX_QUEUES_PER_PORT, + RTE_MAX_ETHPORT_RX_QUEUES, RTE_CACHE_LINE_SIZE); if (dev->data->rx_queues == NULL) { dev->data->nb_rx_queues = 0; @@ -450,7 +454,7 @@ eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */ dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", sizeof(dev->data->tx_queues[0]) * - RTE_MAX_QUEUES_PER_PORT, + RTE_MAX_ETHPORT_TX_QUEUES, RTE_CACHE_LINE_SIZE); if (dev->data->tx_queues == NULL) { dev->data->nb_tx_queues = 0; diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index f1c658f49e..7999327beb 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -1367,18 +1367,18 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES; } - if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) { + if (nb_rx_q > RTE_MAX_ETHPORT_RX_QUEUES) { RTE_ETHDEV_LOG_LINE(ERR, "Number of Rx queues requested (%u) is greater than max supported(%d)", - nb_rx_q, RTE_MAX_QUEUES_PER_PORT); + nb_rx_q, RTE_MAX_ETHPORT_RX_QUEUES); ret = -EINVAL; goto rollback; } - if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) { + if (nb_tx_q > RTE_MAX_ETHPORT_TX_QUEUES) { RTE_ETHDEV_LOG_LINE(ERR, "Number of Tx queues requested (%u) is greater than max supported(%d)", - nb_tx_q, RTE_MAX_QUEUES_PER_PORT); + nb_tx_q, RTE_MAX_ETHPORT_TX_QUEUES); ret = -EINVAL; goto rollback; } @@ -3811,11 +3811,9 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) return eth_err(port_id, diag); } - /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ - dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, - RTE_MAX_QUEUES_PER_PORT); - dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, - RTE_MAX_QUEUES_PER_PORT); + /* Maximum number of queues should be <= RTE_MAX_ETHPORT_RX/TX_QUEUES */ + dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, RTE_MAX_ETHPORT_RX_QUEUES); + dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, RTE_MAX_ETHPORT_TX_QUEUES); dev_info->driver_name = dev->device->driver->name; dev_info->nb_rx_queues = dev->data->nb_rx_queues; diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index 548fada1c7..06d6a3dcd0 100644 --- a/lib/ethdev/rte_ethdev.h +++ b/lib/ethdev/rte_ethdev.h @@ -6090,7 +6090,7 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, #ifdef RTE_ETHDEV_DEBUG_RX if (port_id >= RTE_MAX_ETHPORTS || - queue_id >= RTE_MAX_QUEUES_PER_PORT) { + queue_id >= RTE_MAX_ETHPORT_RX_QUEUES) { RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u or queue_id=%u", port_id, queue_id); @@ -6161,7 +6161,7 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id) #ifdef RTE_ETHDEV_DEBUG_RX if (port_id >= RTE_MAX_ETHPORTS || - queue_id >= RTE_MAX_QUEUES_PER_PORT) { + queue_id >= RTE_MAX_ETHPORT_RX_QUEUES) { RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u or queue_id=%u", port_id, queue_id); @@ -6234,7 +6234,7 @@ rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, #ifdef RTE_ETHDEV_DEBUG_RX if (port_id >= RTE_MAX_ETHPORTS || - queue_id >= RTE_MAX_QUEUES_PER_PORT) { + queue_id >= RTE_MAX_ETHPORT_RX_QUEUES) { RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u or queue_id=%u", port_id, queue_id); @@ -6305,7 +6305,7 @@ static inline int rte_eth_tx_descriptor_status(uint16_t port_id, #ifdef RTE_ETHDEV_DEBUG_TX if (port_id >= RTE_MAX_ETHPORTS || - queue_id >= RTE_MAX_QUEUES_PER_PORT) { + queue_id >= RTE_MAX_ETHPORT_TX_QUEUES) { RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u or queue_id=%u", port_id, queue_id); @@ -6429,7 +6429,7 @@ rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, #ifdef RTE_ETHDEV_DEBUG_TX if (port_id >= RTE_MAX_ETHPORTS || - queue_id >= RTE_MAX_QUEUES_PER_PORT) { + queue_id >= RTE_MAX_ETHPORT_TX_QUEUES) { RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u or queue_id=%u", port_id, queue_id); @@ -6539,7 +6539,7 @@ rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, #ifdef RTE_ETHDEV_DEBUG_TX if (port_id >= RTE_MAX_ETHPORTS || - queue_id >= RTE_MAX_QUEUES_PER_PORT) { + queue_id >= RTE_MAX_ETHPORT_TX_QUEUES) { RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u or queue_id=%u", port_id, queue_id); @@ -6744,7 +6744,7 @@ rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, #ifdef RTE_ETHDEV_DEBUG_TX if (tx_port_id >= RTE_MAX_ETHPORTS || - tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) { + tx_queue_id >= RTE_MAX_ETHPORT_TX_QUEUES) { RTE_ETHDEV_LOG_LINE(ERR, "Invalid tx_port_id=%u or tx_queue_id=%u", tx_port_id, tx_queue_id); @@ -6770,7 +6770,7 @@ rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, #ifdef RTE_ETHDEV_DEBUG_RX if (rx_port_id >= RTE_MAX_ETHPORTS || - rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) { + rx_queue_id >= RTE_MAX_ETHPORT_RX_QUEUES) { RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u", rx_port_id, rx_queue_id); return 0; @@ -6890,7 +6890,7 @@ rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id) goto out; } - if (queue_id >= RTE_MAX_QUEUES_PER_PORT) { + if (queue_id >= RTE_MAX_ETHPORT_TX_QUEUES) { RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u", queue_id, port_id); rc = -EINVAL; -- 2.43.0