From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id EE77F45D6F; Fri, 22 Nov 2024 13:57:00 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A27E54342F; Fri, 22 Nov 2024 13:55:11 +0100 (CET) Received: from mgamail.intel.com (mgamail.intel.com [192.198.163.9]) by mails.dpdk.org (Postfix) with ESMTP id 525A54342A for ; Fri, 22 Nov 2024 13:55:07 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1732280107; x=1763816107; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=3hvL7SxM9UsHB8AiQK9jSdwSEfYS8WlU/K5UkyIAabk=; b=h/NLYIvYKEOoJTSH7la+DraokAJiRgo4wB63aGVfVh63lTEMqXJREnbj XrwQLLIaQIFAzP/UmQazJFPM1We6Gl1rvxUbtVZIdQeqY7AP7wt1EgJ5n Pcw3h21KDryQcDsSuDpPsn/EBcMtF+XX6RzzcVgLsfilSdZY8hA9KrS36 3VSvscuHxpKS91VEXzsIbbPlvhuUvBRg3YOo1J/N+c1vhMa8K5qOOlrx+ Xksn7n6VZ3QyxQcro3oPDGfTesNJGI13RFat4HR4CeTonWi0WjUIXMwdL T7LMARebpdq7p4wMVPOLZ3BLIm2YFIJUPicLLrJwPi9lrRKyhDqqC9M9o Q==; X-CSE-ConnectionGUID: 9ny06tecTjKIwx04Y/V9VA== X-CSE-MsgGUID: SJAMRrRuTJi9L6/YIb83QQ== X-IronPort-AV: E=McAfee;i="6700,10204,11263"; a="43085391" X-IronPort-AV: E=Sophos;i="6.12,175,1728975600"; d="scan'208";a="43085391" Received: from fmviesa007.fm.intel.com ([10.60.135.147]) by fmvoesa103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 22 Nov 2024 04:55:06 -0800 X-CSE-ConnectionGUID: udMIh+CFQpWvr8AUvpEhgQ== X-CSE-MsgGUID: 0sKShrl0Rze/2Zaa7mMBMQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.12,175,1728975600"; d="scan'208";a="90373289" Received: from unknown (HELO silpixa00401385.ir.intel.com) ([10.237.214.25]) by fmviesa007.fm.intel.com with ESMTP; 22 Nov 2024 04:55:05 -0800 From: Bruce Richardson To: dev@dpdk.org Cc: Bruce Richardson , Ian Stokes , Vladimir Medvedkin , Konstantin Ananyev , Anatoly Burakov Subject: [RFC PATCH 17/21] net/iavf: use common Tx queue mbuf cleanup fn Date: Fri, 22 Nov 2024 12:54:10 +0000 Message-ID: <20241122125418.2857301-18-bruce.richardson@intel.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241122125418.2857301-1-bruce.richardson@intel.com> References: <20241122125418.2857301-1-bruce.richardson@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Adjust iavf driver to also use the common mbuf freeing functions on Tx queue release/cleanup. The implementation is complicated a little by the need to integrate the additional "has_ctx" parameter for the iavf code, but changes in other drivers are minimal - just a constant "false" parameter. Signed-off-by: Bruce Richardson --- drivers/common/intel_eth/ieth_rxtx.h | 19 +++++++------ drivers/net/i40e/i40e_rxtx.c | 6 ++-- drivers/net/iavf/iavf_rxtx.c | 37 ++----------------------- drivers/net/iavf/iavf_rxtx_vec_avx512.c | 24 ++-------------- drivers/net/iavf/iavf_rxtx_vec_common.h | 18 ------------ drivers/net/iavf/iavf_rxtx_vec_sse.c | 9 ++---- drivers/net/ice/ice_dcf_ethdev.c | 4 +-- drivers/net/ice/ice_rxtx.c | 6 ++-- drivers/net/ixgbe/ixgbe_rxtx.c | 6 ++-- 9 files changed, 28 insertions(+), 101 deletions(-) diff --git a/drivers/common/intel_eth/ieth_rxtx.h b/drivers/common/intel_eth/ieth_rxtx.h index c8e5e1ad76..dad1ba4ae1 100644 --- a/drivers/common/intel_eth/ieth_rxtx.h +++ b/drivers/common/intel_eth/ieth_rxtx.h @@ -83,7 +83,6 @@ struct ieth_tx_queue { }; struct { /* iavf driver specific values */ uint16_t ipsec_crypto_pkt_md_offset; - uint8_t rel_mbufs_type; #define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0) #define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(1) uint8_t vlan_flag; @@ -103,23 +102,23 @@ struct ieth_tx_queue { }; }; -#define IETH_FREE_BUFS_LOOP(txq, swr, start) do { \ +#define IETH_FREE_BUFS_LOOP(swr, nb_desc, start, end) do { \ uint16_t i = start; \ - if (txq->tx_tail < i) { \ - for (; i < txq->nb_tx_desc; i++) { \ + if (end < i) { \ + for (; i < nb_desc; i++) { \ rte_pktmbuf_free_seg(swr[i].mbuf); \ swr[i].mbuf = NULL; \ } \ i = 0; \ } \ - for (; i < txq->tx_tail; i++) { \ + for (; i < end; i++) { \ rte_pktmbuf_free_seg(swr[i].mbuf); \ swr[i].mbuf = NULL; \ } \ } while(0) static inline void -ieth_txq_release_all_mbufs(struct ieth_tx_queue *txq) +ieth_txq_release_all_mbufs(struct ieth_tx_queue *txq, bool use_ctx) { if (unlikely(!txq || !txq->sw_ring)) return; @@ -138,14 +137,16 @@ ieth_txq_release_all_mbufs(struct ieth_tx_queue *txq) * vPMD tx will not set sw_ring's mbuf to NULL after free, * so need to free remains more carefully. */ - const uint16_t start = txq->tx_next_dd - txq->tx_rs_thresh + 1; + const uint16_t start = (txq->tx_next_dd - txq->tx_rs_thresh + 1) >> use_ctx; + const uint16_t nb_desc = txq->nb_tx_desc >> use_ctx; + const uint16_t end = txq->tx_tail >> use_ctx; if (txq->vector_sw_ring) { struct ieth_vec_tx_entry *swr = txq->sw_ring_v; - IETH_FREE_BUFS_LOOP(txq, swr, start); + IETH_FREE_BUFS_LOOP(swr, nb_desc, start, end); } else { struct ieth_tx_entry *swr = txq->sw_ring; - IETH_FREE_BUFS_LOOP(txq, swr, start); + IETH_FREE_BUFS_LOOP(swr, nb_desc, start, end); } } diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 362a71c8b2..4878b9b8aa 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -1934,7 +1934,7 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return err; } - ieth_txq_release_all_mbufs(txq); + ieth_txq_release_all_mbufs(txq, false); i40e_reset_tx_queue(txq); dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; @@ -2609,7 +2609,7 @@ i40e_tx_queue_release(void *txq) return; } - ieth_txq_release_all_mbufs(q); + ieth_txq_release_all_mbufs(q, false); rte_free(q->sw_ring); rte_memzone_free(q->mz); rte_free(q); @@ -3072,7 +3072,7 @@ i40e_dev_clear_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { if (!dev->data->tx_queues[i]) continue; - ieth_txq_release_all_mbufs(dev->data->tx_queues[i]); + ieth_txq_release_all_mbufs(dev->data->tx_queues[i], false); i40e_reset_tx_queue(dev->data->tx_queues[i]); } diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index c0f7d12804..c574b23f34 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -387,24 +387,6 @@ release_rxq_mbufs(struct iavf_rx_queue *rxq) rxq->rx_nb_avail = 0; } -static inline void -release_txq_mbufs(struct ieth_tx_queue *txq) -{ - uint16_t i; - - if (!txq || !txq->sw_ring) { - PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); - return; - } - - for (i = 0; i < txq->nb_tx_desc; i++) { - if (txq->sw_ring[i].mbuf) { - rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); - txq->sw_ring[i].mbuf = NULL; - } - } -} - static const struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = { [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs, @@ -413,18 +395,6 @@ struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = { #endif }; -static const -struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = { - [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs, -#ifdef RTE_ARCH_X86 - [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse, -#ifdef CC_AVX512_SUPPORT - [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512, -#endif -#endif - -}; - static inline void iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq, struct rte_mbuf *mb, @@ -889,7 +859,6 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->q_set = true; dev->data->tx_queues[queue_idx] = txq; txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx); - txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT; if (check_tx_vec_allow(txq) == false) { struct iavf_adapter *ad = @@ -1068,7 +1037,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) } txq = dev->data->tx_queues[tx_queue_id]; - iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq); + ieth_txq_release_all_mbufs(txq, txq->use_ctx); reset_tx_queue(txq); dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; @@ -1097,7 +1066,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) if (!q) return; - iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q); + ieth_txq_release_all_mbufs(q, q->use_ctx); rte_free(q->sw_ring); rte_memzone_free(q->mz); rte_free(q); @@ -1114,7 +1083,7 @@ iavf_reset_queues(struct rte_eth_dev *dev) txq = dev->data->tx_queues[i]; if (!txq) continue; - iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq); + ieth_txq_release_all_mbufs(txq, txq->use_ctx); reset_tx_queue(txq); dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c index 391fbfcd4d..16cfd6a5b3 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c +++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c @@ -2356,31 +2356,11 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false); } -void __rte_cold -iavf_tx_queue_release_mbufs_avx512(struct ieth_tx_queue *txq) -{ - unsigned int i; - const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1); - const uint16_t end_desc = txq->tx_tail >> txq->use_ctx; /* next empty slot */ - const uint16_t wrap_point = txq->nb_tx_desc >> txq->use_ctx; /* end of SW ring */ - struct ieth_vec_tx_entry *swr = (void *)txq->sw_ring; - - if (!txq->sw_ring || txq->nb_tx_free == max_desc) - return; - - i = (txq->tx_next_dd - txq->tx_rs_thresh + 1) >> txq->use_ctx; - while (i != end_desc) { - rte_pktmbuf_free_seg(swr[i].mbuf); - swr[i].mbuf = NULL; - if (++i == wrap_point) - i = 0; - } -} - int __rte_cold iavf_txq_vec_setup_avx512(struct ieth_tx_queue *txq) { - txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC; + txq->vector_tx = true; + txq->vector_sw_ring = true; return 0; } diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h index ccc447e28d..20d8262e7f 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_common.h +++ b/drivers/net/iavf/iavf_rxtx_vec_common.h @@ -60,24 +60,6 @@ _iavf_rx_queue_release_mbufs_vec(struct iavf_rx_queue *rxq) memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); } -static inline void -_iavf_tx_queue_release_mbufs_vec(struct ieth_tx_queue *txq) -{ - unsigned i; - const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1); - - if (!txq->sw_ring || txq->nb_tx_free == max_desc) - return; - - i = txq->tx_next_dd - txq->tx_rs_thresh + 1; - while (i != txq->tx_tail) { - rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); - txq->sw_ring[i].mbuf = NULL; - if (++i == txq->nb_tx_desc) - i = 0; - } -} - static inline int iavf_rxq_vec_setup_default(struct iavf_rx_queue *rxq) { diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c index de632c6de8..21ad685ff1 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_sse.c +++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c @@ -1458,16 +1458,11 @@ iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq) _iavf_rx_queue_release_mbufs_vec(rxq); } -void __rte_cold -iavf_tx_queue_release_mbufs_sse(struct ieth_tx_queue *txq) -{ - _iavf_tx_queue_release_mbufs_vec(txq); -} - int __rte_cold iavf_txq_vec_setup(struct ieth_tx_queue *txq) { - txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC; + txq->vector_tx = true; + txq->vector_sw_ring = false; return 0; } diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 54d17875bb..959215117f 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -501,7 +501,7 @@ ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) } txq = dev->data->tx_queues[tx_queue_id]; - ieth_txq_release_all_mbufs(txq); + ieth_txq_release_all_mbufs(txq, false); reset_tx_queue(txq); dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; @@ -651,7 +651,7 @@ ice_dcf_stop_queues(struct rte_eth_dev *dev) txq = dev->data->tx_queues[i]; if (!txq) continue; - ieth_txq_release_all_mbufs(txq); + ieth_txq_release_all_mbufs(txq, false); reset_tx_queue(txq); dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 51f82738d5..5e58314b57 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -1089,7 +1089,7 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return -EINVAL; } - ieth_txq_release_all_mbufs(txq); + ieth_txq_release_all_mbufs(txq, false); ice_reset_tx_queue(txq); dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; @@ -1152,7 +1152,7 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return -EINVAL; } - ieth_txq_release_all_mbufs(txq); + ieth_txq_release_all_mbufs(txq, false); txq->qtx_tail = NULL; return 0; @@ -1531,7 +1531,7 @@ ice_tx_queue_release(void *txq) return; } - ieth_txq_release_all_mbufs(q); + ieth_txq_release_all_mbufs(q, false); rte_free(q->sw_ring); rte_memzone_free(q->mz); rte_free(q); diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 0d5f4803e5..9299171db0 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -2457,7 +2457,7 @@ static void __rte_cold ixgbe_tx_queue_release(struct ieth_tx_queue *txq) { if (txq != NULL && txq->ops != NULL) { - ieth_txq_release_all_mbufs(txq); + ieth_txq_release_all_mbufs(txq, false); txq->ops->free_swring(txq); rte_memzone_free(txq->mz); rte_free(txq); @@ -3364,7 +3364,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) struct ieth_tx_queue *txq = dev->data->tx_queues[i]; if (txq != NULL) { - ieth_txq_release_all_mbufs(txq); + ieth_txq_release_all_mbufs(txq, false); txq->ops->reset(txq); dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } @@ -5638,7 +5638,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) } if (txq->ops != NULL) { - ieth_txq_release_all_mbufs(txq); + ieth_txq_release_all_mbufs(txq, false); txq->ops->reset(txq); } dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; -- 2.43.0