From: Bruce Richardson <bruce.richardson@intel.com>
To: Shaiq Wani <shaiq.wani@intel.com>
Cc: <dev@dpdk.org>, <aman.deep.singh@intel.com>
Subject: Re: [PATCH v2 1/2] net/idpf: enable AVX2 for split queue Rx
Date: Thu, 25 Sep 2025 17:38:14 +0100 [thread overview]
Message-ID: <aNVv9vIu9JMKNidj@bricha3-mobl1.ger.corp.intel.com> (raw)
In-Reply-To: <20250925092020.1640175-2-shaiq.wani@intel.com>
On Thu, Sep 25, 2025 at 02:50:19PM +0530, Shaiq Wani wrote:
> In case some CPUs don't support AVX512. Enable AVX2 for them to
> get better per-core performance.
>
> In the single queue model, the same descriptor queue is used by SW
> to post descriptors to the device and used by device to report completed
> descriptors to SW. While as the split queue model separates them into
> different queues for parallel processing and improved performance.
>
> Signed-off-by: Shaiq Wani <shaiq.wani@intel.com>
> ---
Hi,
a few small comments inline below.
Thanks,
/Bruce
> drivers/net/intel/idpf/idpf_common_device.h | 1 +
> drivers/net/intel/idpf/idpf_common_rxtx.c | 7 +
> drivers/net/intel/idpf/idpf_common_rxtx.h | 3 +
> .../net/intel/idpf/idpf_common_rxtx_avx2.c | 249 ++++++++++++++++++
> 4 files changed, 260 insertions(+)
>
> diff --git a/drivers/net/intel/idpf/idpf_common_device.h b/drivers/net/intel/idpf/idpf_common_device.h
> index 3b95d519c6..f9c60ba229 100644
> --- a/drivers/net/intel/idpf/idpf_common_device.h
> +++ b/drivers/net/intel/idpf/idpf_common_device.h
> @@ -49,6 +49,7 @@ enum idpf_rx_func_type {
> IDPF_RX_SINGLEQ,
> IDPF_RX_SINGLEQ_SCATTERED,
> IDPF_RX_SINGLEQ_AVX2,
> + IDPF_RX_SPLITQ_AVX2,
The scalar splitq receive is listed here just as IDPF_RX_DEFAULT, and the
avx-512 splitq as IDPF_RX_AVX512, so following that scheme this should just
be IDPF_RX_AVX2. Alternatively, for consistency you could also rename those
others to be IDPF_RX_SPLITQ and IDPF_RX_SPLITQ_AVX512. Either way naming
consistency should be achievable I think.
> IDPF_RX_AVX512,
> IDPF_RX_SINGLQ_AVX512,
> IDPF_RX_MAX
> diff --git a/drivers/net/intel/idpf/idpf_common_rxtx.c b/drivers/net/intel/idpf/idpf_common_rxtx.c
> index a2b8c372d6..ecb12cfd0a 100644
> --- a/drivers/net/intel/idpf/idpf_common_rxtx.c
> +++ b/drivers/net/intel/idpf/idpf_common_rxtx.c
> @@ -1656,6 +1656,13 @@ const struct ci_rx_path_info idpf_rx_path_infos[] = {
> .rx_offloads = IDPF_RX_VECTOR_OFFLOADS,
> .simd_width = RTE_VECT_SIMD_256,
> .extra.single_queue = true}},
> + [IDPF_RX_SPLITQ_AVX2] = {
> + .pkt_burst = idpf_dp_splitq_recv_pkts_avx2,
> + .info = "Split AVX2 Vector",
> + .features = {
> + .rx_offloads = IDPF_RX_VECTOR_OFFLOADS,
> + .simd_width = RTE_VECT_SIMD_256,
> + }},
> #ifdef CC_AVX512_SUPPORT
> [IDPF_RX_AVX512] = {
> .pkt_burst = idpf_dp_splitq_recv_pkts_avx512,
> diff --git a/drivers/net/intel/idpf/idpf_common_rxtx.h b/drivers/net/intel/idpf/idpf_common_rxtx.h
> index 3bc3323af4..3a9af06c86 100644
> --- a/drivers/net/intel/idpf/idpf_common_rxtx.h
> +++ b/drivers/net/intel/idpf/idpf_common_rxtx.h
> @@ -252,6 +252,9 @@ __rte_internal
> uint16_t idpf_dp_splitq_xmit_pkts_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
> uint16_t nb_pkts);
> __rte_internal
> +uint16_t idpf_dp_splitq_recv_pkts_avx2(void *rxq, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts);
> +__rte_internal
> uint16_t idpf_dp_singleq_recv_scatter_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
> __rte_internal
> diff --git a/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c b/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
> index 21c8f79254..b24653f195 100644
> --- a/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
> +++ b/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
> @@ -482,6 +482,255 @@ idpf_dp_singleq_recv_pkts_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16
> return _idpf_singleq_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts);
> }
>
> +static __rte_always_inline void
> +idpf_splitq_rearm_common(struct idpf_rx_queue *rx_bufq)
> +{
> + int i;
> + uint16_t rx_id;
> + volatile union virtchnl2_rx_buf_desc *rxdp = rx_bufq->rx_ring;
> + struct rte_mbuf **rxep = &rx_bufq->sw_ring[rx_bufq->rxrearm_start];
> +
> + rxdp += rx_bufq->rxrearm_start;
> +
> + /* Try to bulk allocate mbufs from mempool */
> + if (rte_mempool_get_bulk(rx_bufq->mp,
> + (void **)rxep,
> + IDPF_RXQ_REARM_THRESH) < 0) {
Use rte_mbuf_raw_alloc_bulk() instead of rte_mempool_get_bulk(), it has
some extra sanity checks for debug builds and ensures that we don't bypass
too many library layers.
> + if (rx_bufq->rxrearm_nb + IDPF_RXQ_REARM_THRESH >= rx_bufq->nb_rx_desc) {
> + __m128i zero_dma = _mm_setzero_si128();
> +
> + for (i = 0; i < IDPF_VPMD_DESCS_PER_LOOP; i++) {
> + rxep[i] = &rx_bufq->fake_mbuf;
> + _mm_storeu_si128((__m128i *)(uintptr_t)&rxdp[i], zero_dma);
> + }
> + }
> + rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
> + IDPF_RXQ_REARM_THRESH,
> + rte_memory_order_relaxed);
> + return;
> + }
> +
> + __m128i headroom = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, RTE_PKTMBUF_HEADROOM);
> +
> + for (i = 0; i < IDPF_RXQ_REARM_THRESH; i += 2, rxep += 2, rxdp += 2) {
> + struct rte_mbuf *mb0 = rxep[0];
> + struct rte_mbuf *mb1 = rxep[1];
> +
> + __m128i buf_addr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
> + __m128i buf_addr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
> +
> + __m128i dma_addr0 = _mm_unpackhi_epi64(buf_addr0, buf_addr0);
> + __m128i dma_addr1 = _mm_unpackhi_epi64(buf_addr1, buf_addr1);
> +
> + dma_addr0 = _mm_add_epi64(dma_addr0, headroom);
> + dma_addr1 = _mm_add_epi64(dma_addr1, headroom);
> +
> + rxdp[0].split_rd.pkt_addr = _mm_cvtsi128_si64(dma_addr0);
> + rxdp[1].split_rd.pkt_addr = _mm_cvtsi128_si64(dma_addr1);
> + }
> +
> + rx_bufq->rxrearm_start += IDPF_RXQ_REARM_THRESH;
> + if (rx_bufq->rxrearm_start >= rx_bufq->nb_rx_desc)
> + rx_bufq->rxrearm_start = 0;
> +
> + rx_bufq->rxrearm_nb -= IDPF_RXQ_REARM_THRESH;
> +
> + rx_id = (uint16_t)((rx_bufq->rxrearm_start == 0) ?
> + (rx_bufq->nb_rx_desc - 1) : (rx_bufq->rxrearm_start - 1));
> +
> + IDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, rx_id);
> +}
> +
> +static __rte_always_inline void
> +idpf_splitq_rearm_avx2(struct idpf_rx_queue *rx_bufq)
> +{
> + int i;
> + uint16_t rx_id;
> + volatile union virtchnl2_rx_buf_desc *rxdp = rx_bufq->rx_ring;
> + struct rte_mempool_cache *cache =
> + rte_mempool_default_cache(rx_bufq->mp, rte_lcore_id());
> + struct rte_mbuf **rxp = &rx_bufq->sw_ring[rx_bufq->rxrearm_start];
> +
> + rxdp += rx_bufq->rxrearm_start;
> +
> + if (unlikely(!cache)) {
> + idpf_splitq_rearm_common(rx_bufq);
> + return;
> + }
> +
> + if (cache->len < IDPF_RXQ_REARM_THRESH) {
> + uint32_t req = IDPF_RXQ_REARM_THRESH + (cache->size - cache->len);
> + int ret = rte_mempool_ops_dequeue_bulk(rx_bufq->mp,
> + &cache->objs[cache->len], req);
> + if (ret == 0) {
> + cache->len += req;
> + } else {
> + if (rx_bufq->rxrearm_nb + IDPF_RXQ_REARM_THRESH >=
> + rx_bufq->nb_rx_desc) {
> + __m128i dma_addr0 = _mm_setzero_si128();
> + for (i = 0; i < IDPF_VPMD_DESCS_PER_LOOP; i++) {
> + rxp[i] = &rx_bufq->fake_mbuf;
> + _mm_storeu_si128(RTE_CAST_PTR(__m128i *, &rxdp[i]),
> + dma_addr0);
> + }
> + }
> + rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
> + IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
> + return;
> + }
> + }
> + __m128i headroom = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, RTE_PKTMBUF_HEADROOM);
> + const int step = 2;
> +
> + for (i = 0; i < IDPF_RXQ_REARM_THRESH; i += step, rxp += step, rxdp += step) {
> + struct rte_mbuf *mb0 = (struct rte_mbuf *)cache->objs[--cache->len];
> + struct rte_mbuf *mb1 = (struct rte_mbuf *)cache->objs[--cache->len];
> + rxp[0] = mb0;
> + rxp[1] = mb1;
> +
> + __m128i buf_addr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
> + __m128i buf_addr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
> +
> + __m128i dma_addr0 = _mm_unpackhi_epi64(buf_addr0, buf_addr0);
> + __m128i dma_addr1 = _mm_unpackhi_epi64(buf_addr1, buf_addr1);
> +
> + dma_addr0 = _mm_add_epi64(dma_addr0, headroom);
> + dma_addr1 = _mm_add_epi64(dma_addr1, headroom);
> +
> + rxdp[0].split_rd.pkt_addr = _mm_cvtsi128_si64(dma_addr0);
> + rxdp[1].split_rd.pkt_addr = _mm_cvtsi128_si64(dma_addr1);
> + }
> +
> + rx_bufq->rxrearm_start += IDPF_RXQ_REARM_THRESH;
> + if (rx_bufq->rxrearm_start >= rx_bufq->nb_rx_desc)
> + rx_bufq->rxrearm_start = 0;
> +
> + rx_bufq->rxrearm_nb -= IDPF_RXQ_REARM_THRESH;
> +
> + rx_id = (uint16_t)((rx_bufq->rxrearm_start == 0) ?
> + (rx_bufq->nb_rx_desc - 1) : (rx_bufq->rxrearm_start - 1));
> +
> + IDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, rx_id);
> +}
> +static __rte_always_inline uint16_t
> +_idpf_splitq_recv_raw_pkts_vec_avx2(struct idpf_rx_queue *rxq,
> + struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
> +{
> + const uint32_t *ptype_tbl = rxq->adapter->ptype_tbl;
> + struct rte_mbuf **sw_ring = &rxq->bufq2->sw_ring[rxq->rx_tail];
> + volatile union virtchnl2_rx_desc *rxdp =
> + (volatile union virtchnl2_rx_desc *)rxq->rx_ring + rxq->rx_tail;
> +
> + rte_prefetch0(rxdp);
> + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, 4); /* 4 desc per AVX2 iteration */
> +
> + if (rxq->bufq2->rxrearm_nb > IDPF_RXQ_REARM_THRESH)
> + idpf_splitq_rearm_avx2(rxq->bufq2);
> +
> + uint64_t head_gen = rxdp->flex_adv_nic_3_wb.pktlen_gen_bufq_id;
> + if (((head_gen >> VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S) &
> + VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M) != rxq->expected_gen_id)
> + return 0;
> +
> + const __m128i gen_mask =
> + _mm_set1_epi64x(((uint64_t)rxq->expected_gen_id) << 46);
> +
> + uint16_t received = 0;
> + for (uint16_t i = 0; i < nb_pkts; i += 4, rxdp += 4) {
> + /* Step 1: pull mbufs */
> + __m128i ptrs = _mm_loadu_si128((__m128i *)&sw_ring[i]);
> + _mm_storeu_si128((__m128i *)&rx_pkts[i], ptrs);
> +
> + /* Step 2: load descriptors */
> + __m128i d0 = _mm_load_si128(RTE_CAST_PTR(const __m128i *, &rxdp[0]));
> + rte_compiler_barrier();
> + __m128i d1 = _mm_load_si128(RTE_CAST_PTR(const __m128i *, &rxdp[1]));
> + rte_compiler_barrier();
> + __m128i d2 = _mm_load_si128(RTE_CAST_PTR(const __m128i *, &rxdp[2]));
> + rte_compiler_barrier();
> + __m128i d3 = _mm_load_si128(RTE_CAST_PTR(const __m128i *, &rxdp[3]));
> +
> + /* Step 3: shuffle out pkt_len, data_len, vlan, rss */
> + const __m256i shuf = _mm256_set_epi8(
> + /* descriptor 3 */
> + 0xFF, 0xFF, 0xFF, 0xFF, 11, 10, 5, 4,
> + 0xFF, 0xFF, 5, 4, 0xFF, 0xFF, 0xFF, 0xFF,
> + /* descriptor 2 */
> + 0xFF, 0xFF, 0xFF, 0xFF, 11, 10, 5, 4,
> + 0xFF, 0xFF, 5, 4, 0xFF, 0xFF, 0xFF, 0xFF
> + );
> + __m128i d01_lo = d0, d01_hi = d1;
> + __m128i d23_lo = d2, d23_hi = d3;
> +
> + __m256i m23 = _mm256_shuffle_epi8(_mm256_set_m128i(d23_hi, d23_lo), shuf);
> + __m256i m01 = _mm256_shuffle_epi8(_mm256_set_m128i(d01_hi, d01_lo), shuf);
> +
> + /* Step 4: extract ptypes */
> + const __m256i ptype_mask = _mm256_set1_epi16(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M);
> + __m256i pt23 = _mm256_and_si256(_mm256_set_m128i(d23_hi, d23_lo), ptype_mask);
> + __m256i pt01 = _mm256_and_si256(_mm256_set_m128i(d01_hi, d01_lo), ptype_mask);
> +
> + uint16_t ptype2 = _mm256_extract_epi16(pt23, 1);
> + uint16_t ptype3 = _mm256_extract_epi16(pt23, 9);
> + uint16_t ptype0 = _mm256_extract_epi16(pt01, 1);
> + uint16_t ptype1 = _mm256_extract_epi16(pt01, 9);
> +
> + m23 = _mm256_insert_epi32(m23, ptype_tbl[ptype3], 2);
> + m23 = _mm256_insert_epi32(m23, ptype_tbl[ptype2], 0);
> + m01 = _mm256_insert_epi32(m01, ptype_tbl[ptype1], 2);
> + m01 = _mm256_insert_epi32(m01, ptype_tbl[ptype0], 0);
> +
> + /* Step 5: extract gen bits */
> + __m128i sts0 = _mm_srli_epi64(d0, 46);
> + __m128i sts1 = _mm_srli_epi64(d1, 46);
> + __m128i sts2 = _mm_srli_epi64(d2, 46);
> + __m128i sts3 = _mm_srli_epi64(d3, 46);
> +
> + __m128i merged_lo = _mm_unpacklo_epi64(sts0, sts2);
> + __m128i merged_hi = _mm_unpacklo_epi64(sts1, sts3);
> + __m128i valid = _mm_and_si128(_mm_and_si128(merged_lo, merged_hi),
> + _mm_unpacklo_epi64(gen_mask, gen_mask));
> + __m128i cmp = _mm_cmpeq_epi64(valid, _mm_unpacklo_epi64(gen_mask, gen_mask));
> + int burst = _mm_movemask_pd(_mm_castsi128_pd(cmp));
> +
> + /* Step 6: write rearm_data safely */
> + __m128i m01_lo = _mm256_castsi256_si128(m01);
> + __m128i m23_lo = _mm256_castsi256_si128(m23);
> +
> + uint64_t tmp01[2], tmp23[2];
> + _mm_storeu_si128((__m128i *)tmp01, m01_lo);
> + _mm_storeu_si128((__m128i *)tmp23, m23_lo);
> + *(uint64_t *)&rx_pkts[i]->rearm_data = tmp01[0];
> + *(uint64_t *)&rx_pkts[i + 1]->rearm_data = tmp01[1];
> + *(uint64_t *)&rx_pkts[i + 2]->rearm_data = tmp23[0];
> + *(uint64_t *)&rx_pkts[i + 3]->rearm_data = tmp23[1];
> +
> + received += burst;
> + if (burst != 4)
> + break;
> + }
> +
> + rxq->rx_tail += received;
> + if (received & 1) {
> + rxq->rx_tail &= ~(uint16_t)1;
> + received--;
> + }
> + rxq->rx_tail &= (rxq->nb_rx_desc - 1);
> + rxq->expected_gen_id ^= ((rxq->rx_tail & rxq->nb_rx_desc) != 0);
> + rxq->bufq2->rxrearm_nb += received;
> +
> + return received;
> +}
> +
> +RTE_EXPORT_INTERNAL_SYMBOL(idpf_dp_splitq_recv_pkts_avx2)
> +uint16_t
> +idpf_dp_splitq_recv_pkts_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts)
> +{
> + return _idpf_splitq_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts);
> +}
> +
Why the extra level of functions here? In other drivers this is to separate
out common code for a single-buffer, and scattered packet version. Are
there plans to handle multi-mbuf packets here? If not, might as well
collapse the two functions down to one.
> +
> static inline void
> idpf_singleq_vtx1(volatile struct idpf_base_tx_desc *txdp,
> struct rte_mbuf *pkt, uint64_t flags)
> --
> 2.34.1
>
next prev parent reply other threads:[~2025-09-25 16:38 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20250917052658.582872-1-shaiq.wani@intel.com/>
2025-09-25 9:20 ` [PATCH v2 0/2] net/idpf: enable AVX2 for split queue Rx/Tx Shaiq Wani
2025-09-25 9:20 ` [PATCH v2 1/2] net/idpf: enable AVX2 for split queue Rx Shaiq Wani
2025-09-25 16:38 ` Bruce Richardson [this message]
2025-09-25 9:20 ` [PATCH v2 2/2] net/idpf: enable AVX2 for split queue Tx Shaiq Wani
2025-09-25 16:47 ` Bruce Richardson
2025-09-26 8:54 ` [PATCH v3 0/2] enable AVX2 for split queue Rx/Tx Shaiq Wani
2025-09-26 8:54 ` [PATCH v3 1/2] net/idpf: enable AVX2 for split queue Rx Shaiq Wani
2025-09-26 11:40 ` Bruce Richardson
2025-09-26 13:09 ` Burakov, Anatoly
2025-09-26 8:54 ` [PATCH v3 2/2] net/idpf: enable AVX2 for split queue Tx Shaiq Wani
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aNVv9vIu9JMKNidj@bricha3-mobl1.ger.corp.intel.com \
--to=bruce.richardson@intel.com \
--cc=aman.deep.singh@intel.com \
--cc=dev@dpdk.org \
--cc=shaiq.wani@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).