From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C9F7346727; Mon, 12 May 2025 13:00:26 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 32F194067E; Mon, 12 May 2025 12:59:12 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.198.163.13]) by mails.dpdk.org (Postfix) with ESMTP id 18E7E4067E for ; Mon, 12 May 2025 12:59:08 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1747047549; x=1778583549; h=from:to:subject:date:message-id:in-reply-to:references: mime-version:content-transfer-encoding; bh=7QEln2GT9bV2jBxKeovHEKbobuRMEe2a+B8BQjyV/io=; b=Q7uGlyErUWVh/ZEKjR5cOFxNGWXPu8TJGgsMN7mh7/l8qmxscSHucpfL 12vxSPbpeAJ+B4z/Ry/3AXhH/J0MIgQVgn7eb4DbYeYVhDzP7GWZJ1Img vVOrLfFEu3JSRtBKPlwkbDmlf5NjLE/nhx5kv7RMlsP5N0/eFmZE7CHqF 9xLNzKRCd6J1G59Bjf7273Nia+XuHo51fLZQ8xTlPIctYqB1gFZ/J+A90 Le0txnns2nXZyUTECGEyRMlfitdVcnHGiprmKN9zkHiZhmJCASs4MML8x 8YtXxpbAD0pmW4IMvV1eUvVRZoUWqZxWXiKNPnIJoOOjP5wNm2+alQ2nm A==; X-CSE-ConnectionGUID: YcgNuhFjReuONKY3qZVUnA== X-CSE-MsgGUID: 68jqDsvTRQy+2IaCBrPosw== X-IronPort-AV: E=McAfee;i="6700,10204,11430"; a="51488989" X-IronPort-AV: E=Sophos;i="6.15,282,1739865600"; d="scan'208";a="51488989" Received: from orviesa003.jf.intel.com ([10.64.159.143]) by fmvoesa107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 May 2025 03:59:09 -0700 X-CSE-ConnectionGUID: z++hksxVS9ydwl9JvLSZoQ== X-CSE-MsgGUID: 4g3kzYqPSkm8YHKQgeS3gA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.15,282,1739865600"; d="scan'208";a="142098026" Received: from silpixa00401119.ir.intel.com ([10.55.129.167]) by orviesa003.jf.intel.com with ESMTP; 12 May 2025 03:59:08 -0700 From: Anatoly Burakov To: dev@dpdk.org, Bruce Richardson , Ian Stokes , Vladimir Medvedkin Subject: [PATCH v2 12/13] net/intel: add common Rx mbuf recycle Date: Mon, 12 May 2025 11:58:37 +0100 Message-ID: <275ea79e7a1f8796a0924a122715222affa798bf.1747047506.git.anatoly.burakov@intel.com> X-Mailer: git-send-email 2.47.1 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Currently, there are duplicate implementations of Rx mbuf recycle in some drivers, specifically ixgbe and i40e. Move them into a common header. While we're at it, also support no-IOVA-in-mbuf case. Signed-off-by: Anatoly Burakov --- drivers/net/intel/common/recycle_mbufs.h | 67 +++++++++++++++++++ .../i40e/i40e_recycle_mbufs_vec_common.c | 37 +--------- .../ixgbe/ixgbe_recycle_mbufs_vec_common.c | 35 +--------- 3 files changed, 73 insertions(+), 66 deletions(-) create mode 100644 drivers/net/intel/common/recycle_mbufs.h diff --git a/drivers/net/intel/common/recycle_mbufs.h b/drivers/net/intel/common/recycle_mbufs.h new file mode 100644 index 0000000000..fd31c5c1ff --- /dev/null +++ b/drivers/net/intel/common/recycle_mbufs.h @@ -0,0 +1,67 @@ +#ifndef _COMMON_INTEL_RECYCLE_MBUFS_H_ +#define _COMMON_INTEL_RECYCLE_MBUFS_H_ + +#include +#include + +#include +#include +#include + +#include "rx.h" +#include "tx.h" + +/** + * Recycle mbufs for Rx queue. + * + * @param rxq Rx queue pointer + * @param nb_mbufs number of mbufs to recycle + * @param desc_len length of Rx descriptor + */ +static __rte_always_inline void +ci_rx_recycle_mbufs(struct ci_rx_queue *rxq, const uint16_t nb_mbufs, + const size_t desc_len) +{ + struct ci_rx_entry *rxep; + volatile void *rxdp; + uint16_t rx_id; + uint16_t i; + + rxdp = RTE_PTR_ADD(rxq->rx_ring, rxq->rxrearm_start * desc_len); + rxep = &rxq->sw_ring[rxq->rxrearm_start]; + + for (i = 0; i < nb_mbufs; i++) { + volatile uint64_t *cur = RTE_PTR_ADD(rxdp, i * desc_len); + +#if RTE_IOVA_IN_MBUF + const uint64_t paddr = (rxep[i].mbuf)->buf_iova + RTE_PKTMBUF_HEADROOM; + const uint64_t dma_addr = rte_cpu_to_le_64(paddr); +#else + const uint64_t vaddr = (uintptr_t)rxep[i].mbuf->buf_addr + + RTE_PKTMBUF_HEADROOM; + const uint64_t dma_addr = rte_cpu_to_le_64(vaddr); +#endif + + /* 8 bytes PBA followed by 8 bytes HBA */ + *(cur + 1) = 0; + *cur = dma_addr; + } + + /* Update the descriptor initializer index */ + rxq->rxrearm_start += nb_mbufs; + rx_id = rxq->rxrearm_start - 1; + + if (unlikely(rxq->rxrearm_start >= rxq->nb_rx_desc)) { + rxq->rxrearm_start = 0; + rx_id = rxq->nb_rx_desc - 1; + } + + rxq->rxrearm_nb -= nb_mbufs; + + rte_io_wmb(); + + /* Update the tail pointer on the NIC */ + rte_write32_wc_relaxed(rte_cpu_to_le_32(rx_id), rxq->qrx_tail); +} + +#endif diff --git a/drivers/net/intel/i40e/i40e_recycle_mbufs_vec_common.c b/drivers/net/intel/i40e/i40e_recycle_mbufs_vec_common.c index aa7703216d..073357bee2 100644 --- a/drivers/net/intel/i40e/i40e_recycle_mbufs_vec_common.c +++ b/drivers/net/intel/i40e/i40e_recycle_mbufs_vec_common.c @@ -10,43 +10,12 @@ #include "i40e_ethdev.h" #include "i40e_rxtx.h" +#include "../common/recycle_mbufs.h" + void i40e_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs) { - struct ci_rx_queue *rxq = rx_queue; - struct ci_rx_entry *rxep; - volatile union i40e_rx_desc *rxdp; - uint16_t rx_id; - uint64_t paddr; - uint64_t dma_addr; - uint16_t i; - - rxdp = I40E_RX_RING_PTR(rxq, rxq->rxrearm_start); - rxep = &rxq->sw_ring[rxq->rxrearm_start]; - - for (i = 0; i < nb_mbufs; i++) { - /* Initialize rxdp descs. */ - paddr = (rxep[i].mbuf)->buf_iova + RTE_PKTMBUF_HEADROOM; - dma_addr = rte_cpu_to_le_64(paddr); - /* flush desc with pa dma_addr */ - rxdp[i].read.hdr_addr = 0; - rxdp[i].read.pkt_addr = dma_addr; - } - - /* Update the descriptor initializer index */ - rxq->rxrearm_start += nb_mbufs; - rx_id = rxq->rxrearm_start - 1; - - if (unlikely(rxq->rxrearm_start >= rxq->nb_rx_desc)) { - rxq->rxrearm_start = 0; - rx_id = rxq->nb_rx_desc - 1; - } - - rxq->rxrearm_nb -= nb_mbufs; - - rte_io_wmb(); - /* Update the tail pointer on the NIC */ - I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rx_id); + ci_rx_recycle_mbufs(rx_queue, nb_mbufs, sizeof(union i40e_rx_desc)); } uint16_t diff --git a/drivers/net/intel/ixgbe/ixgbe_recycle_mbufs_vec_common.c b/drivers/net/intel/ixgbe/ixgbe_recycle_mbufs_vec_common.c index 1df1787c7f..e2c3523ed2 100644 --- a/drivers/net/intel/ixgbe/ixgbe_recycle_mbufs_vec_common.c +++ b/drivers/net/intel/ixgbe/ixgbe_recycle_mbufs_vec_common.c @@ -8,41 +8,12 @@ #include "ixgbe_ethdev.h" #include "ixgbe_rxtx.h" +#include "../common/recycle_mbufs.h" + void ixgbe_recycle_rx_descriptors_refill_vec(void *rx_queue, uint16_t nb_mbufs) { - struct ci_rx_queue *rxq = rx_queue; - struct ci_rx_entry *rxep; - volatile union ixgbe_adv_rx_desc *rxdp; - uint16_t rx_id; - uint64_t paddr; - uint64_t dma_addr; - uint16_t i; - - rxdp = rxq->ixgbe_rx_ring + rxq->rxrearm_start; - rxep = &rxq->sw_ring[rxq->rxrearm_start]; - - for (i = 0; i < nb_mbufs; i++) { - /* Initialize rxdp descs. */ - paddr = (rxep[i].mbuf)->buf_iova + RTE_PKTMBUF_HEADROOM; - dma_addr = rte_cpu_to_le_64(paddr); - /* Flush descriptors with pa dma_addr */ - rxdp[i].read.hdr_addr = 0; - rxdp[i].read.pkt_addr = dma_addr; - } - - /* Update the descriptor initializer index */ - rxq->rxrearm_start += nb_mbufs; - if (rxq->rxrearm_start >= rxq->nb_rx_desc) - rxq->rxrearm_start = 0; - - rxq->rxrearm_nb -= nb_mbufs; - - rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? - (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); - - /* Update the tail pointer on the NIC */ - IXGBE_PCI_REG_WRITE(rxq->qrx_tail, rx_id); + ci_rx_recycle_mbufs(rx_queue, nb_mbufs, sizeof(union ixgbe_adv_rx_desc)); } uint16_t -- 2.47.1