From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C34F54612D; Fri, 24 Jan 2025 17:31:18 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3DD9D42D91; Fri, 24 Jan 2025 17:30:01 +0100 (CET) Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.9]) by mails.dpdk.org (Postfix) with ESMTP id 1B1CD42D83 for ; Fri, 24 Jan 2025 17:29:59 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1737736200; x=1769272200; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=003yly3KYfOqEQ71FBMjE3wpxKVHCgpp2/VcscX5zYM=; b=TArjT7Y0KgHN4cF580qEqzH8uawClL4zBbwo0DjgrCFEdboGnkmRRy+g OJXv6AhIk6FwEZDs1oi11wy3Ys3TTg9Mmq9tSvfbT1LwIu2pz0CnAxzr6 5kgx3cf3imfbzGc9hkgbkGemo4ldTlqYVuc036Yqs5iMwPd/1w/D5TZqC OrTOVmL1omSt8lpkFl7e7X+2BC/zhWOFhcTt/VRuSToPdqjGLHBNjHOBW PqelZNAIIYEyt0E9GrhDWRm8iWIWKSqckTqBlLgrytRz5RczsUoy+tknD fYalomBIZrg+zsbDbDShYA9LB4mOVITtTkb7okyWRkxyzHJTkdE8PK9mU w==; X-CSE-ConnectionGUID: cU/30CmOSaWJpMSYZ23cag== X-CSE-MsgGUID: wYfXKofiSR28nDMytUH6dA== X-IronPort-AV: E=McAfee;i="6700,10204,11325"; a="60740247" X-IronPort-AV: E=Sophos;i="6.13,231,1732608000"; d="scan'208";a="60740247" Received: from fmviesa010.fm.intel.com ([10.60.135.150]) by orvoesa101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 24 Jan 2025 08:29:59 -0800 X-CSE-ConnectionGUID: HVG8sxnXSEWJZw3vU6UCcg== X-CSE-MsgGUID: 69V69D2oT2SYJilfrb4t2g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.13,231,1732608000"; d="scan'208";a="108352551" Received: from silpixa00401197coob.ir.intel.com (HELO silpixa00401385.ir.intel.com) ([10.237.214.45]) by fmviesa010.fm.intel.com with ESMTP; 24 Jan 2025 08:29:57 -0800 From: Bruce Richardson To: dev@dpdk.org Cc: david.marchand@redhat.com, anatoly.burakov@intel.com, vladimir.medvedkin@intel.com, ian.stokes@intel.com, praveen.shetty@intel.com, Bruce Richardson Subject: [PATCH v6 12/25] net/intel: create common post-Tx buffer free function Date: Fri, 24 Jan 2025 16:29:07 +0000 Message-ID: <20250124162921.1406103-13-bruce.richardson@intel.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250124162921.1406103-1-bruce.richardson@intel.com> References: <20241122125418.2857301-1-bruce.richardson@intel.com> <20250124162921.1406103-1-bruce.richardson@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The actions taken for post-Tx buffer free for the SSE and AVX drivers for i40e, iavf and ice drivers are all common, so centralize those in net/intel/common driver. Signed-off-by: Bruce Richardson --- drivers/net/intel/common/tx.h | 71 ++++++++++++++++++ drivers/net/intel/i40e/i40e_rxtx_vec_common.h | 72 +++---------------- drivers/net/intel/iavf/iavf_rxtx_vec_common.h | 61 +++------------- drivers/net/intel/ice/ice_rxtx_vec_common.h | 61 +++------------- 4 files changed, 98 insertions(+), 167 deletions(-) diff --git a/drivers/net/intel/common/tx.h b/drivers/net/intel/common/tx.h index c372d2838b..a930309c05 100644 --- a/drivers/net/intel/common/tx.h +++ b/drivers/net/intel/common/tx.h @@ -7,6 +7,7 @@ #include #include +#include /* forward declaration of the common intel (ci) queue structure */ struct ci_tx_queue; @@ -107,4 +108,74 @@ ci_tx_backlog_entry(struct ci_tx_entry *txep, struct rte_mbuf **tx_pkts, uint16_ txep[i].mbuf = tx_pkts[i]; } +#define IETH_VPMD_TX_MAX_FREE_BUF 64 + +typedef int (*ci_desc_done_fn)(struct ci_tx_queue *txq, uint16_t idx); + +static __rte_always_inline int +ci_tx_free_bufs(struct ci_tx_queue *txq, ci_desc_done_fn desc_done) +{ + struct ci_tx_entry *txep; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[IETH_VPMD_TX_MAX_FREE_BUF]; + + /* check DD bits on threshold descriptor */ + if (!desc_done(txq, txq->tx_next_dd)) + return 0; + + n = txq->tx_rs_thresh; + + /* first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1) + */ + txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; + + if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) { + for (i = 0; i < n; i++) { + free[i] = txep[i].mbuf; + /* no need to reset txep[i].mbuf in vector path */ + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, n); + goto done; + } + + m = rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) { + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, + nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m != NULL) + rte_mempool_put(m->pool, m); + } + } + +done: + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + #endif /* _COMMON_INTEL_TX_H_ */ diff --git a/drivers/net/intel/i40e/i40e_rxtx_vec_common.h b/drivers/net/intel/i40e/i40e_rxtx_vec_common.h index 726fa716c9..8412bb36ff 100644 --- a/drivers/net/intel/i40e/i40e_rxtx_vec_common.h +++ b/drivers/net/intel/i40e/i40e_rxtx_vec_common.h @@ -16,72 +16,18 @@ #pragma GCC diagnostic ignored "-Wcast-qual" #endif +static inline int +i40e_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx) +{ + return (txq->i40e_tx_ring[idx].cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) == + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE); +} + static __rte_always_inline int i40e_tx_free_bufs(struct ci_tx_queue *txq) { - struct ci_tx_entry *txep; - uint32_t n; - uint32_t i; - int nb_free = 0; - struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ]; - - /* check DD bits on threshold descriptor */ - if ((txq->i40e_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & - rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != - rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) - return 0; - - n = txq->tx_rs_thresh; - - /* first buffer to free from S/W ring is at index - * tx_next_dd - (tx_rs_thresh-1) - */ - txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; - - if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) { - for (i = 0; i < n; i++) { - free[i] = txep[i].mbuf; - /* no need to reset txep[i].mbuf in vector path */ - } - rte_mempool_put_bulk(free[0]->pool, (void **)free, n); - goto done; - } - - m = rte_pktmbuf_prefree_seg(txep[0].mbuf); - if (likely(m != NULL)) { - free[0] = m; - nb_free = 1; - for (i = 1; i < n; i++) { - m = rte_pktmbuf_prefree_seg(txep[i].mbuf); - if (likely(m != NULL)) { - if (likely(m->pool == free[0]->pool)) { - free[nb_free++] = m; - } else { - rte_mempool_put_bulk(free[0]->pool, - (void *)free, - nb_free); - free[0] = m; - nb_free = 1; - } - } - } - rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); - } else { - for (i = 1; i < n; i++) { - m = rte_pktmbuf_prefree_seg(txep[i].mbuf); - if (m != NULL) - rte_mempool_put(m->pool, m); - } - } - -done: - /* buffers were freed, update counters */ - txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); - txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); - if (txq->tx_next_dd >= txq->nb_tx_desc) - txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); - - return txq->tx_rs_thresh; + return ci_tx_free_bufs(txq, i40e_tx_desc_done); } static inline void diff --git a/drivers/net/intel/iavf/iavf_rxtx_vec_common.h b/drivers/net/intel/iavf/iavf_rxtx_vec_common.h index e44c77c7ae..8ea3d4d010 100644 --- a/drivers/net/intel/iavf/iavf_rxtx_vec_common.h +++ b/drivers/net/intel/iavf/iavf_rxtx_vec_common.h @@ -16,61 +16,18 @@ #pragma GCC diagnostic ignored "-Wcast-qual" #endif +static inline int +iavf_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx) +{ + return (txq->iavf_tx_ring[idx].cmd_type_offset_bsz & + rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) == + rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE); +} + static __rte_always_inline int iavf_tx_free_bufs(struct ci_tx_queue *txq) { - struct ci_tx_entry *txep; - uint32_t n; - uint32_t i; - int nb_free = 0; - struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF]; - - /* check DD bits on threshold descriptor */ - if ((txq->iavf_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & - rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) != - rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) - return 0; - - n = txq->tx_rs_thresh; - - /* first buffer to free from S/W ring is at index - * tx_next_dd - (tx_rs_thresh-1) - */ - txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; - m = rte_pktmbuf_prefree_seg(txep[0].mbuf); - if (likely(m != NULL)) { - free[0] = m; - nb_free = 1; - for (i = 1; i < n; i++) { - m = rte_pktmbuf_prefree_seg(txep[i].mbuf); - if (likely(m != NULL)) { - if (likely(m->pool == free[0]->pool)) { - free[nb_free++] = m; - } else { - rte_mempool_put_bulk(free[0]->pool, - (void *)free, - nb_free); - free[0] = m; - nb_free = 1; - } - } - } - rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); - } else { - for (i = 1; i < n; i++) { - m = rte_pktmbuf_prefree_seg(txep[i].mbuf); - if (m) - rte_mempool_put(m->pool, m); - } - } - - /* buffers were freed, update counters */ - txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); - txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); - if (txq->tx_next_dd >= txq->nb_tx_desc) - txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); - - return txq->tx_rs_thresh; + return ci_tx_free_bufs(txq, iavf_tx_desc_done); } static inline void diff --git a/drivers/net/intel/ice/ice_rxtx_vec_common.h b/drivers/net/intel/ice/ice_rxtx_vec_common.h index 1182f0a12d..4d00284ec2 100644 --- a/drivers/net/intel/ice/ice_rxtx_vec_common.h +++ b/drivers/net/intel/ice/ice_rxtx_vec_common.h @@ -12,61 +12,18 @@ #pragma GCC diagnostic ignored "-Wcast-qual" #endif +static inline int +ice_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx) +{ + return (txq->ice_tx_ring[idx].cmd_type_offset_bsz & + rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) == + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE); +} + static __rte_always_inline int ice_tx_free_bufs_vec(struct ci_tx_queue *txq) { - struct ci_tx_entry *txep; - uint32_t n; - uint32_t i; - int nb_free = 0; - struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ]; - - /* check DD bits on threshold descriptor */ - if ((txq->ice_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & - rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) != - rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) - return 0; - - n = txq->tx_rs_thresh; - - /* first buffer to free from S/W ring is at index - * tx_next_dd - (tx_rs_thresh-1) - */ - txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; - m = rte_pktmbuf_prefree_seg(txep[0].mbuf); - if (likely(m)) { - free[0] = m; - nb_free = 1; - for (i = 1; i < n; i++) { - m = rte_pktmbuf_prefree_seg(txep[i].mbuf); - if (likely(m)) { - if (likely(m->pool == free[0]->pool)) { - free[nb_free++] = m; - } else { - rte_mempool_put_bulk(free[0]->pool, - (void *)free, - nb_free); - free[0] = m; - nb_free = 1; - } - } - } - rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); - } else { - for (i = 1; i < n; i++) { - m = rte_pktmbuf_prefree_seg(txep[i].mbuf); - if (m) - rte_mempool_put(m->pool, m); - } - } - - /* buffers were freed, update counters */ - txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); - txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); - if (txq->tx_next_dd >= txq->nb_tx_desc) - txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); - - return txq->tx_rs_thresh; + return ci_tx_free_bufs(txq, ice_tx_desc_done); } static inline void -- 2.43.0