DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: Joyce Kong <joyce.kong@arm.com>,
	chenbo.xia@intel.com, jerinj@marvell.com, ruifeng.wang@arm.com,
	honnappa.nagarahalli@arm.com
Cc: dev@dpdk.org, nd@arm.com
Subject: Re: [dpdk-dev] [PATCH v1 3/4] net/virtio: add vectorized packed ring Tx NEON path
Date: Tue, 5 Jan 2021 15:33:59 +0100	[thread overview]
Message-ID: <a2dc3e21-9c93-3f1f-ce04-ad72bd02bdad@redhat.com> (raw)
In-Reply-To: <20201117100635.27690-4-joyce.kong@arm.com>



On 11/17/20 11:06 AM, Joyce Kong wrote:
> Optimize packed ring Tx batch path with NEON instructions.
> 
> Signed-off-by: Joyce Kong <joyce.kong@arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> ---
>  drivers/net/virtio/virtio_rxtx_packed.h      |   6 +-
>  drivers/net/virtio/virtio_rxtx_packed_neon.h | 143 +++++++++++++++++++
>  2 files changed, 148 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/virtio/virtio_rxtx_packed.h b/drivers/net/virtio/virtio_rxtx_packed.h
> index 8f5198ad7..016b6fb24 100644
> --- a/drivers/net/virtio/virtio_rxtx_packed.h
> +++ b/drivers/net/virtio/virtio_rxtx_packed.h
> @@ -28,6 +28,8 @@
>  /* flag bits offset in packed ring desc from ID */
>  #define FLAGS_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \
>  	offsetof(struct vring_packed_desc, id)) * BYTE_SIZE)
> +#define FLAGS_LEN_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \
> +	offsetof(struct vring_packed_desc, len)) * BYTE_SIZE)
>  #endif
>  
>  #define PACKED_FLAGS_MASK ((0ULL | VRING_PACKED_DESC_F_AVAIL_USED) << \
> @@ -36,13 +38,15 @@
>  /* reference count offset in mbuf rearm data */
>  #define REFCNT_BITS_OFFSET ((offsetof(struct rte_mbuf, refcnt) - \
>  	offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)
> +
> +#ifdef CC_AVX512_SUPPORT
>  /* segment number offset in mbuf rearm data */
>  #define SEG_NUM_BITS_OFFSET ((offsetof(struct rte_mbuf, nb_segs) - \
>  	offsetof(struct rte_mbuf, rearm_data)) * BYTE_SIZE)
> -
>  /* default rearm data */
>  #define DEFAULT_REARM_DATA (1ULL << SEG_NUM_BITS_OFFSET | \
>  	1ULL << REFCNT_BITS_OFFSET)
> +#endif
>  
>  /* id bits offset in packed ring desc higher 64bits */
>  #define ID_BITS_OFFSET ((offsetof(struct vring_packed_desc, id) - \
> diff --git a/drivers/net/virtio/virtio_rxtx_packed_neon.h b/drivers/net/virtio/virtio_rxtx_packed_neon.h
> index fb1e49909..041f771ea 100644
> --- a/drivers/net/virtio/virtio_rxtx_packed_neon.h
> +++ b/drivers/net/virtio/virtio_rxtx_packed_neon.h
> @@ -16,6 +16,149 @@
>  #include "virtio_rxtx_packed.h"
>  #include "virtqueue.h"
>  
> +static inline int
> +virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
> +				   struct rte_mbuf **tx_pkts)
> +{
> +	struct virtqueue *vq = txvq->vq;
> +	uint16_t head_size = vq->hw->vtnet_hdr_size;
> +	uint16_t idx = vq->vq_avail_idx;
> +	struct virtio_net_hdr *hdr;
> +	struct vq_desc_extra *dxp;
> +	struct vring_packed_desc *p_desc;
> +	uint16_t i;
> +
> +	if (idx & PACKED_BATCH_MASK)
> +		return -1;
> +
> +	if (unlikely((idx + PACKED_BATCH_SIZE) > vq->vq_nentries))
> +		return -1;
> +
> +	/* Map four refcnt and nb_segs from mbufs to one NEON register. */
> +	uint8x16_t ref_seg_msk = {
> +		2, 3, 4, 5,
> +		10, 11, 12, 13,
> +		18, 19, 20, 21,
> +		26, 27, 28, 29
> +	};
> +
> +	/* Map four data_off from mbufs to one NEON register. */
> +	uint8x8_t data_msk = {
> +		0, 1,
> +		8, 9,
> +		16, 17,
> +		24, 25
> +	};
> +
> +	uint16x8_t net_hdr_msk = {
> +		0xFFFF, 0xFFFF,
> +		0, 0, 0, 0
> +	};
> +
> +	uint16x4_t pkts[PACKED_BATCH_SIZE];
> +	uint8x16x2_t mbuf;
> +	/* Load four mbufs rearm data. */
> +	RTE_BUILD_BUG_ON(REFCNT_BITS_OFFSET >= 64);
> +	pkts[0] = vld1_u16((uint16_t *)&tx_pkts[0]->rearm_data);
> +	pkts[1] = vld1_u16((uint16_t *)&tx_pkts[1]->rearm_data);
> +	pkts[2] = vld1_u16((uint16_t *)&tx_pkts[2]->rearm_data);
> +	pkts[3] = vld1_u16((uint16_t *)&tx_pkts[3]->rearm_data);
> +
> +	mbuf.val[0] = vreinterpretq_u8_u16(vcombine_u16(pkts[0], pkts[1]));
> +	mbuf.val[1] = vreinterpretq_u8_u16(vcombine_u16(pkts[2], pkts[3]));
> +
> +	/* refcnt = 1 and nb_segs = 1 */
> +	uint32x4_t def_ref_seg = vdupq_n_u32(0x10001);
> +	/* Check refcnt and nb_segs. */
> +	uint32x4_t ref_seg = vreinterpretq_u32_u8(vqtbl2q_u8(mbuf, ref_seg_msk));
> +	poly128_t cmp1 = vreinterpretq_p128_u32(~vceqq_u32(ref_seg, def_ref_seg));
> +	if (unlikely(cmp1))
> +		return -1;
> +
> +	/* Check headroom is enough. */
> +	uint16x4_t head_rooms = vdup_n_u16(head_size);
> +	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
> +			 offsetof(struct rte_mbuf, rearm_data));
> +	uint16x4_t data_offset = vreinterpret_u16_u8(vqtbl2_u8(mbuf, data_msk));
> +	uint64x1_t cmp2 = vreinterpret_u64_u16(vclt_u16(data_offset, head_rooms));
> +	if (unlikely(vget_lane_u64(cmp2, 0)))
> +		return -1;
> +
> +	virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> +		dxp = &vq->vq_descx[idx + i];
> +		dxp->ndescs = 1;
> +		dxp->cookie = tx_pkts[i];
> +	}
> +
> +	virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> +		tx_pkts[i]->data_off -= head_size;
> +		tx_pkts[i]->data_len += head_size;
> +	}
> +
> +	uint64x2x2_t desc[PACKED_BATCH_SIZE / 2];
> +	uint64x2_t base_addr0 = {
> +		VIRTIO_MBUF_ADDR(tx_pkts[0], vq) + tx_pkts[0]->data_off,
> +		VIRTIO_MBUF_ADDR(tx_pkts[1], vq) + tx_pkts[1]->data_off
> +	};
> +	uint64x2_t base_addr1 = {
> +		VIRTIO_MBUF_ADDR(tx_pkts[2], vq) + tx_pkts[2]->data_off,
> +		VIRTIO_MBUF_ADDR(tx_pkts[3], vq) + tx_pkts[3]->data_off
> +	};
> +
> +	desc[0].val[0] = base_addr0;
> +	desc[1].val[0] = base_addr1;
> +
> +	uint64_t flags = (uint64_t)vq->vq_packed.cached_flags << FLAGS_LEN_BITS_OFFSET;
> +	uint64x2_t tx_desc0 = {
> +		flags | (uint64_t)idx << ID_BITS_OFFSET | tx_pkts[0]->data_len,
> +		flags | (uint64_t)(idx + 1) << ID_BITS_OFFSET | tx_pkts[1]->data_len
> +	};
> +
> +	uint64x2_t tx_desc1 = {
> +		flags | (uint64_t)(idx + 2) << ID_BITS_OFFSET | tx_pkts[2]->data_len,
> +		flags | (uint64_t)(idx + 3) << ID_BITS_OFFSET | tx_pkts[3]->data_len
> +	};
> +
> +	desc[0].val[1] = tx_desc0;
> +	desc[1].val[1] = tx_desc1;
> +
> +	if (!vq->hw->has_tx_offload) {
> +		virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> +			hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
> +					struct virtio_net_hdr *, -head_size);
> +			/* Clear net hdr. */
> +			uint16x8_t v_hdr = vld1q_u16((void *)hdr);
> +			vst1q_u16((void *)hdr, vandq_u16(v_hdr, net_hdr_msk));
> +		}
> +	} else {
> +		virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> +			hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
> +					struct virtio_net_hdr *, -head_size);
> +			virtqueue_xmit_offload(hdr, tx_pkts[i], true);
> +		}
> +	}
> +
> +	/* Enqueue packet buffers. */
> +	p_desc = &vq->vq_packed.ring.desc[idx];
> +	vst2q_u64((uint64_t *)p_desc, desc[0]);
> +	vst2q_u64((uint64_t *)(p_desc + 2), desc[1]);
> +
> +	virtio_update_batch_stats(&txvq->stats, tx_pkts[0]->pkt_len,
> +			tx_pkts[1]->pkt_len, tx_pkts[2]->pkt_len,
> +			tx_pkts[3]->pkt_len);
> +
> +	vq->vq_avail_idx += PACKED_BATCH_SIZE;
> +	vq->vq_free_cnt -= PACKED_BATCH_SIZE;
> +
> +	if (vq->vq_avail_idx >= vq->vq_nentries) {
> +		vq->vq_avail_idx -= vq->vq_nentries;
> +		vq->vq_packed.cached_flags ^=
> +			VRING_PACKED_DESC_F_AVAIL_USED;
> +	}
> +
> +	return 0;
> +}
> +
>  static inline uint16_t
>  virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
>  				   struct rte_mbuf **rx_pkts)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


  reply	other threads:[~2021-01-05 14:34 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-11 12:09 [dpdk-dev] [RFC 0/3] Vectorize packed ring RX path with NEON Joyce Kong
2020-09-11 12:09 ` [dpdk-dev] [RFC 1/3] net/virtio: move AVX based Rx and Tx code to separate file Joyce Kong
2020-09-11 12:09 ` [dpdk-dev] [RFC 2/3] net/virtio: add vectorized packed ring Rx NEON path Joyce Kong
2020-09-11 12:09 ` [dpdk-dev] [RFC 3/3] net/virtio: add election for packed vector " Joyce Kong
2020-10-05  7:34 ` [dpdk-dev] [RFC 0/3] Vectorize packed ring RX path with NEON Maxime Coquelin
2020-10-08  6:54   ` Joyce Kong
2020-10-15  9:01   ` Ruifeng Wang
2020-10-15  9:02     ` Maxime Coquelin
2020-11-17 10:06 ` [dpdk-dev] [PATCH v1 0/4] Vectorize packed ring RX/TX " Joyce Kong
2020-11-17 10:06   ` [dpdk-dev] [PATCH v1 1/4] net/virtio: move AVX based Rx and Tx code to separate file Joyce Kong
2021-01-05 14:06     ` Maxime Coquelin
2020-11-17 10:06   ` [dpdk-dev] [PATCH v1 2/4] net/virtio: add vectorized packed ring Rx NEON path Joyce Kong
2021-01-05 14:16     ` Maxime Coquelin
2021-01-05 14:27       ` Maxime Coquelin
2021-01-07 10:39         ` Maxime Coquelin
2021-01-08  7:29           ` Joyce Kong
2021-01-08 17:02     ` Ferruh Yigit
2021-01-08 22:26       ` Honnappa Nagarahalli
2021-01-11 13:05         ` Aaron Conole
2021-01-11 10:45       ` Maxime Coquelin
2021-01-11 13:04       ` Aaron Conole
2020-11-17 10:06   ` [dpdk-dev] [PATCH v1 3/4] net/virtio: add vectorized packed ring Tx " Joyce Kong
2021-01-05 14:33     ` Maxime Coquelin [this message]
2020-11-17 10:06   ` [dpdk-dev] [PATCH v1 4/4] net/virtio: add election for packed vector " Joyce Kong
2021-01-05 14:42     ` Maxime Coquelin
2021-01-08  9:11   ` [dpdk-dev] [PATCH v1 0/4] Vectorize packed ring RX/TX path with NEON Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a2dc3e21-9c93-3f1f-ce04-ad72bd02bdad@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=honnappa.nagarahalli@arm.com \
    --cc=jerinj@marvell.com \
    --cc=joyce.kong@arm.com \
    --cc=nd@arm.com \
    --cc=ruifeng.wang@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).