From: Bruce Richardson <bruce.richardson@intel.com>
To: Shaiq Wani <shaiq.wani@intel.com>
Cc: <dev@dpdk.org>, <aman.deep.singh@intel.com>
Subject: Re: [PATCH v5 4/4] net/idpf: use common Tx free fn in idpf
Date: Fri, 28 Mar 2025 17:25:44 +0000 [thread overview]
Message-ID: <Z-bbmOZB3t6GfeSR@bricha3-mobl1.ger.corp.intel.com> (raw)
In-Reply-To: <20250327160437.2296127-5-shaiq.wani@intel.com>
On Thu, Mar 27, 2025 at 09:34:37PM +0530, Shaiq Wani wrote:
> Switch the idpf driver to use the common Tx free function for
> AVX2 and AVX512
>
> Signed-off-by: Shaiq Wani <shaiq.wani@intel.com>
> ---
> .../net/intel/idpf/idpf_common_rxtx_avx2.c | 68 +----
> .../net/intel/idpf/idpf_common_rxtx_avx512.c | 237 +-----------------
> 2 files changed, 22 insertions(+), 283 deletions(-)
>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> diff --git a/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c b/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
> index bce0257804..6399f357d3 100644
> --- a/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
> +++ b/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
> @@ -79,6 +79,14 @@ idpf_singleq_rx_rearm(struct idpf_rx_queue *rxq)
> IDPF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
> }
>
> +static inline int
> +idpf_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
> +{
> + return (txq->idpf_tx_ring[idx].qw1 &
Minor nit - watch indentation here, this is indented by two instead of 1.
[Next line is correctly indented at 3 levels, though, so don't adjust it]
> + rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) ==
> + rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
> +}
> +
> static inline uint16_t
> _idpf_singleq_recv_raw_pkts_vec_avx2(struct idpf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts)
> @@ -479,64 +487,6 @@ idpf_dp_singleq_recv_pkts_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16
> return _idpf_singleq_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts);
> }
>
> -static __rte_always_inline int
> -idpf_singleq_tx_free_bufs_vec(struct ci_tx_queue *txq)
> -{
> - struct ci_tx_entry_vec *txep;
> - uint32_t n;
> - uint32_t i;
> - int nb_free = 0;
> - struct rte_mbuf *m;
> - struct rte_mbuf **free = alloca(sizeof(struct rte_mbuf *) * txq->tx_rs_thresh);
> -
> - /* check DD bits on threshold descriptor */
> - if ((txq->idpf_tx_ring[txq->tx_next_dd].qw1 &
> - rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=
> - rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE))
> - return 0;
> -
> - n = txq->tx_rs_thresh;
> -
> - /* first buffer to free from S/W ring is at index
> - * tx_next_dd - (tx_rs_thresh-1)
> - */
> - txep = &txq->sw_ring_vec[txq->tx_next_dd - (n - 1)];
> - m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
> - if (likely(m)) {
> - free[0] = m;
> - nb_free = 1;
> - for (i = 1; i < n; i++) {
> - m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
> - if (likely(m)) {
> - if (likely(m->pool == free[0]->pool)) {
> - free[nb_free++] = m;
> - } else {
> - rte_mempool_put_bulk(free[0]->pool,
> - (void *)free,
> - nb_free);
> - free[0] = m;
> - nb_free = 1;
> - }
> - }
> - }
> - rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
> - } else {
> - for (i = 1; i < n; i++) {
> - m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
> - if (m)
> - rte_mempool_put(m->pool, m);
> - }
> - }
> -
> - /* buffers were freed, update counters */
> - txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
> - txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
> - if (txq->tx_next_dd >= txq->nb_tx_desc)
> - txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
> -
> - return txq->tx_rs_thresh;
> -}
> -
> static inline void
> idpf_singleq_vtx1(volatile struct idpf_base_tx_desc *txdp,
> struct rte_mbuf *pkt, uint64_t flags)
> @@ -621,7 +571,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts
> nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
>
> if (txq->nb_tx_free < txq->tx_free_thresh)
> - idpf_singleq_tx_free_bufs_vec(txq);
> + ci_tx_free_bufs_vec(txq, idpf_tx_desc_done, false);
>
> nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
> if (unlikely(nb_pkts == 0))
> diff --git a/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c b/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
> index c0ec754642..dbbdc71e22 100644
> --- a/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
> +++ b/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
> @@ -122,6 +122,14 @@ idpf_singleq_rearm_common(struct idpf_rx_queue *rxq)
> IDPF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
> }
>
> +static inline int
> +idpf_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
> +{
> + return (txq->idpf_tx_ring[idx].qw1 &
> + rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) ==
> + rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
> +}
> +
This code appears above two. Can it not be put in a common header to avoid
duplication?
> static __rte_always_inline void
> idpf_singleq_rearm(struct idpf_rx_queue *rxq)
> {
<snip>
next prev parent reply other threads:[~2025-03-28 17:25 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-12 15:53 [PATCH] net/intel: using common functions in idpf driver Shaiq Wani
2025-03-12 16:38 ` Bruce Richardson
2025-03-24 12:39 ` [PATCH v2 0/4] Use common structures and fns in IDPF and Shaiq Wani
2025-03-24 12:39 ` [PATCH v2 1/4] net/intel: use common Tx queue structure Shaiq Wani
2025-03-24 12:49 ` [PATCH v3 0/4] using common functions in idpf driver Shaiq Wani
2025-03-24 12:49 ` [PATCH v3 1/4] net/intel: use common Tx queue structure Shaiq Wani
2025-03-27 10:44 ` [PATCH v4 0/4] net/intel: using common functions in idpf driver Shaiq Wani
2025-03-27 10:44 ` [PATCH v4 1/4] net/intel: align Tx queue struct field names Shaiq Wani
2025-03-27 16:04 ` [PATCH v5 0/4] net/intel: using common functions in idpf driver Shaiq Wani
2025-03-27 16:04 ` [PATCH v5 1/4] net/intel: align Tx queue struct field names Shaiq Wani
2025-03-28 16:57 ` Bruce Richardson
2025-03-27 16:04 ` [PATCH v5 2/4] net/intel: use common Tx queue structure Shaiq Wani
2025-03-28 17:22 ` Bruce Richardson
2025-03-28 17:55 ` Bruce Richardson
2025-03-27 16:04 ` [PATCH v5 3/4] net/intel: use common Tx entry structure Shaiq Wani
2025-03-28 17:17 ` Bruce Richardson
2025-03-27 16:04 ` [PATCH v5 4/4] net/idpf: use common Tx free fn in idpf Shaiq Wani
2025-03-28 17:25 ` Bruce Richardson [this message]
2025-03-28 15:29 ` [PATCH v5 0/4] net/intel: using common functions in idpf driver Bruce Richardson
2025-03-28 15:36 ` David Marchand
2025-03-28 17:58 ` Bruce Richardson
2025-03-27 10:45 ` [PATCH v4 2/4] net/intel: use common Tx queue structure Shaiq Wani
2025-03-27 10:45 ` [PATCH v4 3/4] net/intel: use common Tx entry structure Shaiq Wani
2025-03-27 10:45 ` [PATCH v4 4/4] net/idpf: use common Tx free fn in idpf Shaiq Wani
2025-03-24 12:49 ` [PATCH v3 2/4] net/intel: align Tx queue struct field names Shaiq Wani
2025-03-24 13:16 ` Bruce Richardson
2025-03-24 12:49 ` [PATCH v3 3/4] net/intel: use common Tx entry structure Shaiq Wani
2025-03-24 12:49 ` [PATCH v3 4/4] net/idpf: use common Tx free fn in idpf Shaiq Wani
2025-03-24 12:39 ` [PATCH v2 2/4] net/intel: align Tx queue struct field names Shaiq Wani
2025-03-24 12:40 ` [PATCH v2 3/4] net/intel: use common Tx entry structure Shaiq Wani
2025-03-24 12:40 ` [PATCH v2 4/4] net/idpf: use common Tx free fn in idpf Shaiq Wani
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Z-bbmOZB3t6GfeSR@bricha3-mobl1.ger.corp.intel.com \
--to=bruce.richardson@intel.com \
--cc=aman.deep.singh@intel.com \
--cc=dev@dpdk.org \
--cc=shaiq.wani@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).