From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7297245E7A; Wed, 11 Dec 2024 18:34:15 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0BEFE4064C; Wed, 11 Dec 2024 18:33:49 +0100 (CET) Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.19]) by mails.dpdk.org (Postfix) with ESMTP id 8A2DB40647 for ; Wed, 11 Dec 2024 18:33:46 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1733938427; x=1765474427; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=6E+8Bn6t1U07i7OKUR+5LIkEd2CFQu1X58cIpCERikc=; b=LC0zKITvGk2d5gERmGNPDLg/U8Zz6T4p60jdagk38J0GkH8R67cbPCOs piwiyC6WEISTbkQ3hDR7wgO/lF4J2/snLwibJ4xCvlDlwWIxWOl+znpSi FklQjb8oLMN/1VhvXR3CmYTKY13Vfb1zrUYayqHIjtRoD6iDWfD7Tys8e btn4h0zTkxfdoa60zdWTfs1HUcLzPL2jYfu//8YqxoYpLbu0aP3HlnCvM 5cMZyNSXzgppLBhF0RNLClSGmQeFXkY47mPAut8e/AlYUs0Hx/U0x1xWz u3Vv4ZmqWoSJDhRZx/aNzDuxrlxeir/epC1OIHj/LbnN0eehOC2NzgJFJ A==; X-CSE-ConnectionGUID: 3/DSEf89R/inEFs6/ronIQ== X-CSE-MsgGUID: +WBlORHlTDG2QAQUwL8Nqw== X-IronPort-AV: E=McAfee;i="6700,10204,11283"; a="34206041" X-IronPort-AV: E=Sophos;i="6.12,226,1728975600"; d="scan'208";a="34206041" Received: from fmviesa007.fm.intel.com ([10.60.135.147]) by orvoesa111.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 11 Dec 2024 09:33:46 -0800 X-CSE-ConnectionGUID: QS2uBwKaRu6+WuDVTnNSUg== X-CSE-MsgGUID: IlVeMTzwRJeJ1U3a9632Zw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.12,226,1728975600"; d="scan'208";a="95719124" Received: from silpixa00401197coob.ir.intel.com (HELO silpixa00401385.ir.intel.com) ([10.237.214.45]) by fmviesa007.fm.intel.com with ESMTP; 11 Dec 2024 09:33:44 -0800 From: Bruce Richardson To: dev@dpdk.org Cc: Bruce Richardson , Ian Stokes , Vladimir Medvedkin , Konstantin Ananyev , Anatoly Burakov , Wathsala Vithanage Subject: [PATCH v3 04/22] drivers/net: align Tx queue struct field names Date: Wed, 11 Dec 2024 17:33:10 +0000 Message-ID: <20241211173331.65262-5-bruce.richardson@intel.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20241211173331.65262-1-bruce.richardson@intel.com> References: <20241122125418.2857301-1-bruce.richardson@intel.com> <20241211173331.65262-1-bruce.richardson@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Across the various Intel drivers sometimes different names are given to fields in the Tx queue structure which have the same function. Do some renaming to align things better for future merging. Signed-off-by: Bruce Richardson --- drivers/net/i40e/i40e_rxtx.c | 6 +-- drivers/net/i40e/i40e_rxtx.h | 2 +- drivers/net/iavf/iavf_rxtx.c | 60 ++++++++++++------------- drivers/net/iavf/iavf_rxtx.h | 14 +++--- drivers/net/iavf/iavf_rxtx_vec_avx2.c | 19 ++++---- drivers/net/iavf/iavf_rxtx_vec_avx512.c | 57 +++++++++++------------ drivers/net/iavf/iavf_rxtx_vec_common.h | 24 +++++----- drivers/net/iavf/iavf_rxtx_vec_sse.c | 18 ++++---- drivers/net/iavf/iavf_vchnl.c | 2 +- drivers/net/ixgbe/base/ixgbe_osdep.h | 2 +- drivers/net/ixgbe/ixgbe_rxtx.c | 16 +++---- drivers/net/ixgbe/ixgbe_rxtx.h | 6 +-- drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c | 2 +- drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c | 2 +- 14 files changed, 116 insertions(+), 114 deletions(-) diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 2e1f07d2a1..b0bb20fe9a 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -2549,7 +2549,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->vsi = vsi; txq->tx_deferred_start = tx_conf->tx_deferred_start; - txq->tx_ring_phys_addr = tz->iova; + txq->tx_ring_dma = tz->iova; txq->tx_ring = (struct i40e_tx_desc *)tz->addr; /* Allocate software ring */ @@ -2923,7 +2923,7 @@ i40e_tx_queue_init(struct i40e_tx_queue *txq) /* clear the context structure first */ memset(&tx_ctx, 0, sizeof(tx_ctx)); tx_ctx.new_context = 1; - tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT; + tx_ctx.base = txq->tx_ring_dma / I40E_QUEUE_BASE_ADDR_UNIT; tx_ctx.qlen = txq->nb_tx_desc; #ifdef RTE_LIBRTE_IEEE1588 @@ -3209,7 +3209,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf) txq->reg_idx = pf->fdir.fdir_vsi->base_queue; txq->vsi = pf->fdir.fdir_vsi; - txq->tx_ring_phys_addr = tz->iova; + txq->tx_ring_dma = tz->iova; txq->tx_ring = (struct i40e_tx_desc *)tz->addr; /* diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 0f5d3cb0b7..f420c98687 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -129,7 +129,7 @@ struct i40e_rx_queue { */ struct i40e_tx_queue { uint16_t nb_tx_desc; /**< number of TX descriptors */ - uint64_t tx_ring_phys_addr; /**< TX ring DMA address */ + rte_iova_t tx_ring_dma; /**< TX ring DMA address */ volatile struct i40e_tx_desc *tx_ring; /**< TX ring virtual address */ struct ci_tx_entry *sw_ring; /**< virtual address of SW ring */ uint16_t tx_tail; /**< current value of tail register */ diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index e337f20073..adaaeb4625 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -216,8 +216,8 @@ static inline bool check_tx_vec_allow(struct iavf_tx_queue *txq) { if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) && - txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST && - txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) { + txq->tx_rs_thresh >= IAVF_VPMD_TX_MAX_BURST && + txq->tx_rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) { PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq."); return true; } @@ -309,13 +309,13 @@ reset_tx_queue(struct iavf_tx_queue *txq) } txq->tx_tail = 0; - txq->nb_used = 0; + txq->nb_tx_used = 0; txq->last_desc_cleaned = txq->nb_tx_desc - 1; - txq->nb_free = txq->nb_tx_desc - 1; + txq->nb_tx_free = txq->nb_tx_desc - 1; - txq->next_dd = txq->rs_thresh - 1; - txq->next_rs = txq->rs_thresh - 1; + txq->tx_next_dd = txq->tx_rs_thresh - 1; + txq->tx_next_rs = txq->tx_rs_thresh - 1; } static int @@ -845,8 +845,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, } txq->nb_tx_desc = nb_desc; - txq->rs_thresh = tx_rs_thresh; - txq->free_thresh = tx_free_thresh; + txq->tx_rs_thresh = tx_rs_thresh; + txq->tx_free_thresh = tx_free_thresh; txq->queue_id = queue_idx; txq->port_id = dev->data->port_id; txq->offloads = offloads; @@ -881,7 +881,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, rte_free(txq); return -ENOMEM; } - txq->tx_ring_phys_addr = mz->iova; + txq->tx_ring_dma = mz->iova; txq->tx_ring = (struct iavf_tx_desc *)mz->addr; txq->mz = mz; @@ -2387,7 +2387,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq) volatile struct iavf_tx_desc *txd = txq->tx_ring; - desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh); + desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); if (desc_to_clean_to >= nb_tx_desc) desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); @@ -2411,7 +2411,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq) txd[desc_to_clean_to].cmd_type_offset_bsz = 0; txq->last_desc_cleaned = desc_to_clean_to; - txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); return 0; } @@ -2807,7 +2807,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Check if the descriptor ring needs to be cleaned. */ - if (txq->nb_free < txq->free_thresh) + if (txq->nb_tx_free < txq->tx_free_thresh) iavf_xmit_cleanup(txq); desc_idx = txq->tx_tail; @@ -2862,14 +2862,14 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) "port_id=%u queue_id=%u tx_first=%u tx_last=%u", txq->port_id, txq->queue_id, desc_idx, desc_idx_last); - if (nb_desc_required > txq->nb_free) { + if (nb_desc_required > txq->nb_tx_free) { if (iavf_xmit_cleanup(txq)) { if (idx == 0) return 0; goto end_of_tx; } - if (unlikely(nb_desc_required > txq->rs_thresh)) { - while (nb_desc_required > txq->nb_free) { + if (unlikely(nb_desc_required > txq->tx_rs_thresh)) { + while (nb_desc_required > txq->nb_tx_free) { if (iavf_xmit_cleanup(txq)) { if (idx == 0) return 0; @@ -2991,10 +2991,10 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* The last packet data descriptor needs End Of Packet (EOP) */ ddesc_cmd = IAVF_TX_DESC_CMD_EOP; - txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required); - txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required); + txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_desc_required); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_desc_required); - if (txq->nb_used >= txq->rs_thresh) { + if (txq->nb_tx_used >= txq->tx_rs_thresh) { PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id=" "%4u (port=%d queue=%d)", desc_idx_last, txq->port_id, txq->queue_id); @@ -3002,7 +3002,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) ddesc_cmd |= IAVF_TX_DESC_CMD_RS; /* Update txq RS bit counters */ - txq->nb_used = 0; + txq->nb_tx_used = 0; } ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd << @@ -4278,11 +4278,11 @@ iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq, tx_id = txq->tx_tail; tx_last = tx_id; - if (txq->nb_free == 0 && iavf_xmit_cleanup(txq)) + if (txq->nb_tx_free == 0 && iavf_xmit_cleanup(txq)) return 0; - nb_tx_to_clean = txq->nb_free; - nb_tx_free_last = txq->nb_free; + nb_tx_to_clean = txq->nb_tx_free; + nb_tx_free_last = txq->nb_tx_free; if (!free_cnt) free_cnt = txq->nb_tx_desc; @@ -4305,16 +4305,16 @@ iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq, tx_id = swr_ring[tx_id].next_id; } while (--nb_tx_to_clean && pkt_cnt < free_cnt && tx_id != tx_last); - if (txq->rs_thresh > txq->nb_tx_desc - - txq->nb_free || tx_id == tx_last) + if (txq->tx_rs_thresh > txq->nb_tx_desc - + txq->nb_tx_free || tx_id == tx_last) break; if (pkt_cnt < free_cnt) { if (iavf_xmit_cleanup(txq)) break; - nb_tx_to_clean = txq->nb_free - nb_tx_free_last; - nb_tx_free_last = txq->nb_free; + nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; + nb_tx_free_last = txq->nb_tx_free; } } @@ -4356,8 +4356,8 @@ iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->nb_desc = txq->nb_tx_desc; - qinfo->conf.tx_free_thresh = txq->free_thresh; - qinfo->conf.tx_rs_thresh = txq->rs_thresh; + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; qinfo->conf.offloads = txq->offloads; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; } @@ -4432,8 +4432,8 @@ iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset) desc = txq->tx_tail + offset; /* go to next desc that has the RS bit */ - desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) * - txq->rs_thresh; + desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) * + txq->tx_rs_thresh; if (desc >= txq->nb_tx_desc) { desc -= txq->nb_tx_desc; if (desc >= txq->nb_tx_desc) diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index 1a191f2c89..44e2de731c 100644 --- a/drivers/net/iavf/iavf_rxtx.h +++ b/drivers/net/iavf/iavf_rxtx.h @@ -277,25 +277,25 @@ struct iavf_rx_queue { struct iavf_tx_queue { const struct rte_memzone *mz; /* memzone for Tx ring */ volatile struct iavf_tx_desc *tx_ring; /* Tx ring virtual address */ - uint64_t tx_ring_phys_addr; /* Tx ring DMA address */ + rte_iova_t tx_ring_dma; /* Tx ring DMA address */ struct ci_tx_entry *sw_ring; /* address array of SW ring */ uint16_t nb_tx_desc; /* ring length */ uint16_t tx_tail; /* current value of tail */ volatile uint8_t *qtx_tail; /* register address of tail */ /* number of used desc since RS bit set */ - uint16_t nb_used; - uint16_t nb_free; + uint16_t nb_tx_used; + uint16_t nb_tx_free; uint16_t last_desc_cleaned; /* last desc have been cleaned*/ - uint16_t free_thresh; - uint16_t rs_thresh; + uint16_t tx_free_thresh; + uint16_t tx_rs_thresh; uint8_t rel_mbufs_type; struct iavf_vsi *vsi; /**< the VSI this queue belongs to */ uint16_t port_id; uint16_t queue_id; uint64_t offloads; - uint16_t next_dd; /* next to set RS, for VPMD */ - uint16_t next_rs; /* next to check DD, for VPMD */ + uint16_t tx_next_dd; /* next to set RS, for VPMD */ + uint16_t tx_next_rs; /* next to check DD, for VPMD */ uint16_t ipsec_crypto_pkt_md_offset; uint64_t mbuf_errors; diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c index 28885800e0..42e09a2adf 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c +++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c @@ -1742,18 +1742,19 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC; uint64_t rs = IAVF_TX_DESC_CMD_RS | flags; - if (txq->nb_free < txq->free_thresh) + if (txq->nb_tx_free < txq->tx_free_thresh) iavf_tx_free_bufs(txq); - nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts); + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) return 0; + nb_commit = nb_pkts; tx_id = txq->tx_tail; txdp = &txq->tx_ring[tx_id]; txep = &txq->sw_ring[tx_id]; - txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); n = (uint16_t)(txq->nb_tx_desc - tx_id); if (nb_commit >= n) { @@ -1768,7 +1769,7 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, nb_commit = (uint16_t)(nb_commit - n); tx_id = 0; - txq->next_rs = (uint16_t)(txq->rs_thresh - 1); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); /* avoid reach the end of ring */ txdp = &txq->tx_ring[tx_id]; @@ -1780,12 +1781,12 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, iavf_vtx(txdp, tx_pkts, nb_commit, flags, offload); tx_id = (uint16_t)(tx_id + nb_commit); - if (tx_id > txq->next_rs) { - txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |= + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) << IAVF_TXD_QW1_CMD_SHIFT); - txq->next_rs = - (uint16_t)(txq->next_rs + txq->rs_thresh); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); } txq->tx_tail = tx_id; @@ -1806,7 +1807,7 @@ iavf_xmit_pkts_vec_avx2_common(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t ret, num; /* cross rs_thresh boundary is not allowed */ - num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh); + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); ret = iavf_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx], num, offload); nb_tx += ret; diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c index a899309f94..dc1fef24f0 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c +++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c @@ -1854,18 +1854,18 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq) struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF]; /* check DD bits on threshold descriptor */ - if ((txq->tx_ring[txq->next_dd].cmd_type_offset_bsz & + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) != rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) return 0; - n = txq->rs_thresh >> txq->use_ctx; + n = txq->tx_rs_thresh >> txq->use_ctx; /* first buffer to free from S/W ring is at index * tx_next_dd - (tx_rs_thresh-1) */ txep = (void *)txq->sw_ring; - txep += (txq->next_dd >> txq->use_ctx) - (n - 1); + txep += (txq->tx_next_dd >> txq->use_ctx) - (n - 1); if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) { struct rte_mempool *mp = txep[0].mbuf->pool; @@ -1951,12 +1951,12 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq) done: /* buffers were freed, update counters */ - txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh); - txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh); - if (txq->next_dd >= txq->nb_tx_desc) - txq->next_dd = (uint16_t)(txq->rs_thresh - 1); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); - return txq->rs_thresh; + return txq->tx_rs_thresh; } static __rte_always_inline void @@ -2319,19 +2319,20 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC; uint64_t rs = IAVF_TX_DESC_CMD_RS | flags; - if (txq->nb_free < txq->free_thresh) + if (txq->nb_tx_free < txq->tx_free_thresh) iavf_tx_free_bufs_avx512(txq); - nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts); + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) return 0; + nb_commit = nb_pkts; tx_id = txq->tx_tail; txdp = &txq->tx_ring[tx_id]; txep = (void *)txq->sw_ring; txep += tx_id; - txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); n = (uint16_t)(txq->nb_tx_desc - tx_id); if (nb_commit >= n) { @@ -2346,7 +2347,7 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, nb_commit = (uint16_t)(nb_commit - n); tx_id = 0; - txq->next_rs = (uint16_t)(txq->rs_thresh - 1); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); /* avoid reach the end of ring */ txdp = &txq->tx_ring[tx_id]; @@ -2359,12 +2360,12 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, iavf_vtx(txdp, tx_pkts, nb_commit, flags, offload); tx_id = (uint16_t)(tx_id + nb_commit); - if (tx_id > txq->next_rs) { - txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |= + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) << IAVF_TXD_QW1_CMD_SHIFT); - txq->next_rs = - (uint16_t)(txq->next_rs + txq->rs_thresh); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); } txq->tx_tail = tx_id; @@ -2386,10 +2387,10 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts, uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC; uint64_t rs = IAVF_TX_DESC_CMD_RS | flags; - if (txq->nb_free < txq->free_thresh) + if (txq->nb_tx_free < txq->tx_free_thresh) iavf_tx_free_bufs_avx512(txq); - nb_commit = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts << 1); + nb_commit = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts << 1); nb_commit &= 0xFFFE; if (unlikely(nb_commit == 0)) return 0; @@ -2400,7 +2401,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts, txep = (void *)txq->sw_ring; txep += (tx_id >> 1); - txq->nb_free = (uint16_t)(txq->nb_free - nb_commit); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_commit); n = (uint16_t)(txq->nb_tx_desc - tx_id); if (n != 0 && nb_commit >= n) { @@ -2414,7 +2415,7 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts, nb_commit = (uint16_t)(nb_commit - n); - txq->next_rs = (uint16_t)(txq->rs_thresh - 1); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); tx_id = 0; /* avoid reach the end of ring */ txdp = txq->tx_ring; @@ -2427,12 +2428,12 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts, ctx_vtx(txdp, tx_pkts, nb_mbuf, flags, offload, txq->vlan_flag); tx_id = (uint16_t)(tx_id + nb_commit); - if (tx_id > txq->next_rs) { - txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |= + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) << IAVF_TXD_QW1_CMD_SHIFT); - txq->next_rs = - (uint16_t)(txq->next_rs + txq->rs_thresh); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); } txq->tx_tail = tx_id; @@ -2452,7 +2453,7 @@ iavf_xmit_pkts_vec_avx512_cmn(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t ret, num; /* cross rs_thresh boundary is not allowed */ - num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh); + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); ret = iavf_xmit_fixed_burst_vec_avx512(tx_queue, &tx_pkts[nb_tx], num, offload); nb_tx += ret; @@ -2480,10 +2481,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq) const uint16_t wrap_point = txq->nb_tx_desc >> txq->use_ctx; /* end of SW ring */ struct ci_tx_entry_vec *swr = (void *)txq->sw_ring; - if (!txq->sw_ring || txq->nb_free == max_desc) + if (!txq->sw_ring || txq->nb_tx_free == max_desc) return; - i = (txq->next_dd - txq->rs_thresh + 1) >> txq->use_ctx; + i = (txq->tx_next_dd - txq->tx_rs_thresh + 1) >> txq->use_ctx; while (i != end_desc) { rte_pktmbuf_free_seg(swr[i].mbuf); swr[i].mbuf = NULL; @@ -2517,7 +2518,7 @@ iavf_xmit_pkts_vec_avx512_ctx_cmn(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t ret, num; /* cross rs_thresh boundary is not allowed */ - num = (uint16_t)RTE_MIN(nb_pkts << 1, txq->rs_thresh); + num = (uint16_t)RTE_MIN(nb_pkts << 1, txq->tx_rs_thresh); num = num >> 1; ret = iavf_xmit_fixed_burst_vec_avx512_ctx(tx_queue, &tx_pkts[nb_tx], num, offload); diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h index 2c118cc059..ff24055c34 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_common.h +++ b/drivers/net/iavf/iavf_rxtx_vec_common.h @@ -26,17 +26,17 @@ iavf_tx_free_bufs(struct iavf_tx_queue *txq) struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF]; /* check DD bits on threshold descriptor */ - if ((txq->tx_ring[txq->next_dd].cmd_type_offset_bsz & + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) != rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) return 0; - n = txq->rs_thresh; + n = txq->tx_rs_thresh; /* first buffer to free from S/W ring is at index * tx_next_dd - (tx_rs_thresh-1) */ - txep = &txq->sw_ring[txq->next_dd - (n - 1)]; + txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; m = rte_pktmbuf_prefree_seg(txep[0].mbuf); if (likely(m != NULL)) { free[0] = m; @@ -65,12 +65,12 @@ iavf_tx_free_bufs(struct iavf_tx_queue *txq) } /* buffers were freed, update counters */ - txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh); - txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh); - if (txq->next_dd >= txq->nb_tx_desc) - txq->next_dd = (uint16_t)(txq->rs_thresh - 1); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); - return txq->rs_thresh; + return txq->tx_rs_thresh; } static inline void @@ -109,10 +109,10 @@ _iavf_tx_queue_release_mbufs_vec(struct iavf_tx_queue *txq) unsigned i; const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1); - if (!txq->sw_ring || txq->nb_free == max_desc) + if (!txq->sw_ring || txq->nb_tx_free == max_desc) return; - i = txq->next_dd - txq->rs_thresh + 1; + i = txq->tx_next_dd - txq->tx_rs_thresh + 1; while (i != txq->tx_tail) { rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); txq->sw_ring[i].mbuf = NULL; @@ -169,8 +169,8 @@ iavf_tx_vec_queue_default(struct iavf_tx_queue *txq) if (!txq) return -1; - if (txq->rs_thresh < IAVF_VPMD_TX_MAX_BURST || - txq->rs_thresh > IAVF_VPMD_TX_MAX_FREE_BUF) + if (txq->tx_rs_thresh < IAVF_VPMD_TX_MAX_BURST || + txq->tx_rs_thresh > IAVF_VPMD_TX_MAX_FREE_BUF) return -1; if (txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c index bc4b8f14c8..ed8455d669 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_sse.c +++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c @@ -1374,10 +1374,10 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint64_t rs = IAVF_TX_DESC_CMD_RS | flags; int i; - if (txq->nb_free < txq->free_thresh) + if (txq->nb_tx_free < txq->tx_free_thresh) iavf_tx_free_bufs(txq); - nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts); + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) return 0; nb_commit = nb_pkts; @@ -1386,7 +1386,7 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, txdp = &txq->tx_ring[tx_id]; txep = &txq->sw_ring[tx_id]; - txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); n = (uint16_t)(txq->nb_tx_desc - tx_id); if (nb_commit >= n) { @@ -1400,7 +1400,7 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, nb_commit = (uint16_t)(nb_commit - n); tx_id = 0; - txq->next_rs = (uint16_t)(txq->rs_thresh - 1); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); /* avoid reach the end of ring */ txdp = &txq->tx_ring[tx_id]; @@ -1412,12 +1412,12 @@ iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, iavf_vtx(txdp, tx_pkts, nb_commit, flags); tx_id = (uint16_t)(tx_id + nb_commit); - if (tx_id > txq->next_rs) { - txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |= + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= rte_cpu_to_le_64(((uint64_t)IAVF_TX_DESC_CMD_RS) << IAVF_TXD_QW1_CMD_SHIFT); - txq->next_rs = - (uint16_t)(txq->next_rs + txq->rs_thresh); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); } txq->tx_tail = tx_id; @@ -1441,7 +1441,7 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t ret, num; /* cross rs_thresh boundary is not allowed */ - num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh); + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); ret = iavf_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num); nb_tx += ret; nb_pkts -= ret; diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 065ab3594c..0646a2f978 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -1247,7 +1247,7 @@ iavf_configure_queues(struct iavf_adapter *adapter, /* Virtchnnl configure tx queues by pairs */ if (i < adapter->dev_data->nb_tx_queues) { vc_qp->txq.ring_len = txq[i]->nb_tx_desc; - vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr; + vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_dma; } vc_qp->rxq.vsi_id = vf->vsi_res->vsi_id; diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h index 502f386b56..95dbe2bedd 100644 --- a/drivers/net/ixgbe/base/ixgbe_osdep.h +++ b/drivers/net/ixgbe/base/ixgbe_osdep.h @@ -124,7 +124,7 @@ static inline uint32_t ixgbe_read_addr(volatile void* addr) rte_write32_wc_relaxed((rte_cpu_to_le_32(value)), reg) #define IXGBE_PCI_REG_ADDR(hw, reg) \ - ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg))) + ((volatile void *)((char *)(hw)->hw_addr + (reg))) #define IXGBE_PCI_REG_ARRAY_ADDR(hw, reg, index) \ IXGBE_PCI_REG_ADDR((hw), (reg) + ((index) << 2)) diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index db4b993ebc..0a80b944f0 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -308,7 +308,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* update tail pointer */ rte_wmb(); - IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail); + IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, txq->tx_tail); return nb_pkts; } @@ -946,7 +946,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) tx_id, (unsigned) nb_tx); - IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); + IXGBE_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id); txq->tx_tail = tx_id; return nb_tx; @@ -2786,11 +2786,11 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, hw->mac.type == ixgbe_mac_X550_vf || hw->mac.type == ixgbe_mac_X550EM_x_vf || hw->mac.type == ixgbe_mac_X550EM_a_vf) - txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx)); + txq->qtx_tail = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx)); else - txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx)); + txq->qtx_tail = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx)); - txq->tx_ring_phys_addr = tz->iova; + txq->tx_ring_dma = tz->iova; txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr; /* Allocate software ring */ @@ -2802,7 +2802,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, - txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + txq->sw_ring, txq->tx_ring, txq->tx_ring_dma); /* set up vector or scalar TX function as appropriate */ ixgbe_set_tx_function(dev, txq); @@ -5303,7 +5303,7 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; - bus_addr = txq->tx_ring_phys_addr; + bus_addr = txq->tx_ring_dma; IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx), (uint32_t)(bus_addr & 0x00000000ffffffffULL)); IXGBE_WRITE_REG(hw, IXGBE_TDBAH(txq->reg_idx), @@ -5887,7 +5887,7 @@ ixgbevf_dev_tx_init(struct rte_eth_dev *dev) /* Setup the Base and Length of the Tx Descriptor Rings */ for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; - bus_addr = txq->tx_ring_phys_addr; + bus_addr = txq->tx_ring_dma; IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i), (uint32_t)(bus_addr & 0x00000000ffffffffULL)); IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 1647396419..00e2009b3e 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -186,12 +186,12 @@ struct ixgbe_advctx_info { struct ixgbe_tx_queue { /** TX ring virtual address. */ volatile union ixgbe_adv_tx_desc *tx_ring; - uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ + rte_iova_t tx_ring_dma; /**< TX ring DMA address. */ union { struct ci_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */ struct ci_tx_entry_vec *sw_ring_v; /**< address of SW ring for vector PMD */ }; - volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ + volatile uint8_t *qtx_tail; /**< Address of TDT register. */ uint16_t nb_tx_desc; /**< number of TX descriptors. */ uint16_t tx_tail; /**< current value of TDT reg. */ /**< Start freeing TX buffers if there are less free descriptors than @@ -218,7 +218,7 @@ struct ixgbe_tx_queue { /** Hardware context0 history. */ struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; const struct ixgbe_txq_ops *ops; /**< txq ops */ - uint8_t tx_deferred_start; /**< not in global dev start. */ + bool tx_deferred_start; /**< not in global dev start. */ #ifdef RTE_LIB_SECURITY uint8_t using_ipsec; /**< indicates that IPsec TX feature is in use */ diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c index 02b53c008e..871c1a7cd2 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c @@ -628,7 +628,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, txq->tx_tail = tx_id; - IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail); + IXGBE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); return nb_pkts; } diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c index c8b5377c9f..37f2079519 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c @@ -751,7 +751,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, txq->tx_tail = tx_id; - IXGBE_PCI_REG_WC_WRITE(txq->tdt_reg_addr, txq->tx_tail); + IXGBE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail); return nb_pkts; } -- 2.43.0