DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: Joyce Kong <joyce.kong@arm.com>,
	chenbo.xia@intel.com, jerinj@marvell.com, ruifeng.wang@arm.com,
	honnappa.nagarahalli@arm.com
Cc: dev@dpdk.org, nd@arm.com
Subject: Re: [dpdk-dev] [PATCH v1 2/4] net/virtio: add vectorized packed ring Rx NEON path
Date: Tue, 5 Jan 2021 15:27:21 +0100	[thread overview]
Message-ID: <1473dd81-26aa-04ae-dd57-14ddf7880fd0@redhat.com> (raw)
In-Reply-To: <e5803611-23e6-a1f0-b470-20c210ab03a9@redhat.com>



On 1/5/21 3:16 PM, Maxime Coquelin wrote:
> 
> 
> On 11/17/20 11:06 AM, Joyce Kong wrote:
>> Optimize packed ring Rx batch path with NEON instructions.
>>
>> Signed-off-by: Joyce Kong <joyce.kong@arm.com>
>> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
>> ---
>>  drivers/net/virtio/virtio_rxtx_packed.h      |  15 ++
>>  drivers/net/virtio/virtio_rxtx_packed_neon.h | 150 +++++++++++++++++++
>>  2 files changed, 165 insertions(+)
>>  create mode 100644 drivers/net/virtio/virtio_rxtx_packed_neon.h
>>
>> diff --git a/drivers/net/virtio/virtio_rxtx_packed.h b/drivers/net/virtio/virtio_rxtx_packed.h
>> index b0b1d63ec..8f5198ad7 100644
>> --- a/drivers/net/virtio/virtio_rxtx_packed.h
>> +++ b/drivers/net/virtio/virtio_rxtx_packed.h
>> @@ -19,9 +19,16 @@
>>  #include "virtqueue.h"
>>  
>>  #define BYTE_SIZE 8
>> +
>> +#ifdef CC_AVX512_SUPPORT
>>  /* flag bits offset in packed ring desc higher 64bits */
>>  #define FLAGS_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \
>>  	offsetof(struct vring_packed_desc, len)) * BYTE_SIZE)
>> +#elif defined(RTE_ARCH_ARM)
>> +/* flag bits offset in packed ring desc from ID */
>> +#define FLAGS_BITS_OFFSET ((offsetof(struct vring_packed_desc, flags) - \
>> +	offsetof(struct vring_packed_desc, id)) * BYTE_SIZE)
>> +#endif
>>  
>>  #define PACKED_FLAGS_MASK ((0ULL | VRING_PACKED_DESC_F_AVAIL_USED) << \
>>  	FLAGS_BITS_OFFSET)
>> @@ -44,8 +51,16 @@
>>  /* net hdr short size mask */
>>  #define NET_HDR_MASK 0x3F
>>  
>> +#ifdef RTE_ARCH_ARM
>> +/* The cache line size on different Arm platforms are different, so
>> + * put a four batch size here to match with the minimum cache line
>> + * size and accommodate NEON register size.
>> + */
>> +#define PACKED_BATCH_SIZE 4
>> +#else
>>  #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
>>  	sizeof(struct vring_packed_desc))
>> +#endif
>>  #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
>>  
>>  #ifdef VIRTIO_GCC_UNROLL_PRAGMA
>> diff --git a/drivers/net/virtio/virtio_rxtx_packed_neon.h b/drivers/net/virtio/virtio_rxtx_packed_neon.h
>> new file mode 100644
>> index 000000000..fb1e49909
>> --- /dev/null
>> +++ b/drivers/net/virtio/virtio_rxtx_packed_neon.h
>> @@ -0,0 +1,150 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2020 Arm Corporation
>> + */
>> +
>> +#include <stdlib.h>
>> +#include <stdint.h>
>> +#include <stdio.h>
>> +#include <string.h>
>> +#include <errno.h>
>> +
>> +#include <rte_net.h>
>> +#include <rte_vect.h>
>> +
>> +#include "virtio_ethdev.h"
>> +#include "virtio_pci.h"
>> +#include "virtio_rxtx_packed.h"
>> +#include "virtqueue.h"
>> +
>> +static inline uint16_t
>> +virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
>> +				   struct rte_mbuf **rx_pkts)
>> +{
>> +	struct virtqueue *vq = rxvq->vq;
>> +	struct virtio_hw *hw = vq->hw;
>> +	uint16_t head_size = hw->vtnet_hdr_size;
>> +	uint16_t id = vq->vq_used_cons_idx;
>> +	struct vring_packed_desc *p_desc;
>> +	uint16_t i;
>> +
>> +	if (id & PACKED_BATCH_MASK)
>> +		return -1;
>> +
>> +	if (unlikely((id + PACKED_BATCH_SIZE) > vq->vq_nentries))
>> +		return -1;
> 
> This function returns an unsigned short, I think you should return 0
> here since it failed to dequeue packets.
> 
>> +	/* Map packed descriptor to mbuf fields. */
>> +	uint8x16_t shuf_msk1 = {
>> +		0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type set as unknown */
>> +		0, 1,			/* octet 1~0, low 16 bits pkt_len */
>> +		0xFF, 0xFF,		/* skip high 16 bits of pkt_len, zero out */
>> +		0, 1,			/* octet 1~0, 16 bits data_len */
>> +		0xFF, 0xFF,		/* vlan tci set as unknown */
>> +		0xFF, 0xFF, 0xFF, 0xFF
>> +	};
>> +
>> +	uint8x16_t shuf_msk2 = {
>> +		0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type set as unknown */
>> +		8, 9,			/* octet 9~8, low 16 bits pkt_len */
>> +		0xFF, 0xFF,		/* skip high 16 bits of pkt_len, zero out */
>> +		8, 9,			/* octet 9~8, 16 bits data_len */
>> +		0xFF, 0xFF,		/* vlan tci set as unknown */
>> +		0xFF, 0xFF, 0xFF, 0xFF
>> +	};
>> +
>> +	/* Subtract the header length. */
>> +	uint16x8_t len_adjust = {
>> +		0, 0,		/* ignore pkt_type field */
>> +		head_size,	/* sub head_size on pkt_len */
>> +		0,		/* ignore high 16 bits of pkt_len */
>> +		head_size,	/* sub head_size on data_len */
>> +		0, 0, 0		/* ignore non-length fields */
>> +	};
>> +
>> +	uint64x2_t desc[PACKED_BATCH_SIZE / 2];
>> +	uint64x2x2_t mbp[PACKED_BATCH_SIZE / 2];
>> +	uint64x2_t pkt_mb[PACKED_BATCH_SIZE];
>> +
>> +	p_desc = &vq->vq_packed.ring.desc[id];
>> +	/* Load high 64 bits of packed descriptor 0,1. */
>> +	desc[0] = vld2q_u64((uint64_t *)(p_desc)).val[1];
>> +	/* Load high 64 bits of packed descriptor 2,3. */
>> +	desc[1] = vld2q_u64((uint64_t *)(p_desc + 2)).val[1];
>> +
>> +	/* Only care avail/used bits. */
>> +	uint32x4_t v_mask = vdupq_n_u32(PACKED_FLAGS_MASK);
>> +	/* Extract high 32 bits of packed descriptor (id, flags). */
>> +	uint32x4_t v_desc = vuzp2q_u32(vreinterpretq_u32_u64(desc[0]),
>> +				vreinterpretq_u32_u64(desc[1]));
>> +	uint32x4_t v_flag = vandq_u32(v_desc, v_mask);
>> +
>> +	uint32x4_t v_used_flag = vdupq_n_u32(0);
>> +	if (vq->vq_packed.used_wrap_counter)
>> +		v_used_flag = vdupq_n_u32(PACKED_FLAGS_MASK);
>> +
>> +	poly128_t desc_stats = vreinterpretq_p128_u32(~vceqq_u32(v_flag, v_used_flag));
>> +
>> +	/* Check all descs are used. */
>> +	if (desc_stats)
>> +		return -1;
> 
> Same here. You should return 0 here as the queue is full.

Just looked again at the code and at AVX implementation.
It should not return 0 here, but any positive value.

Maybe the cleanest way would change the function prototype to int.
0: success
-1: failure

>> +
>> +	/* Load 2 mbuf pointers per time. */
>> +	mbp[0] = vld2q_u64((uint64_t *)&vq->vq_descx[id]);
>> +	vst1q_u64((uint64_t *)&rx_pkts[0], mbp[0].val[0]);
>> +
>> +	mbp[1] = vld2q_u64((uint64_t *)&vq->vq_descx[id + 2]);
>> +	vst1q_u64((uint64_t *)&rx_pkts[2], mbp[1].val[0]);
>> +
>> +	/**
>> +	 *  Update data length and packet length for descriptor.
>> +	 *  structure of pkt_mb:
>> +	 *  --------------------------------------------------------------------
>> +	 *  |32 bits pkt_type|32 bits pkt_len|16 bits data_len|16 bits vlan_tci|
>> +	 *  --------------------------------------------------------------------
>> +	 */
>> +	pkt_mb[0] = vreinterpretq_u64_u8(vqtbl1q_u8(
>> +			vreinterpretq_u8_u64(desc[0]), shuf_msk1));
>> +	pkt_mb[1] = vreinterpretq_u64_u8(vqtbl1q_u8(
>> +			vreinterpretq_u8_u64(desc[0]), shuf_msk2));
>> +	pkt_mb[2] = vreinterpretq_u64_u8(vqtbl1q_u8(
>> +			vreinterpretq_u8_u64(desc[1]), shuf_msk1))'
>> +	pkt_mb[3] = vreinterpretq_u64_u8(vqtbl1q_u8(
>> +			vreinterpretq_u8_u64(desc[1]), shuf_msk2));
>> +
>> +	pkt_mb[0] = vreinterpretq_u64_u16(vsubq_u16(
>> +			vreinterpretq_u16_u64(pkt_mb[0]), len_adjust));
>> +	pkt_mb[1] = vreinterpretq_u64_u16(vsubq_u16(
>> +			vreinterpretq_u16_u64(pkt_mb[1]), len_adjust));
>> +	pkt_mb[2] = vreinterpretq_u64_u16(vsubq_u16(
>> +			vreinterpretq_u16_u64(pkt_mb[2]), len_adjust));
>> +	pkt_mb[3] = vreinterpretq_u64_u16(vsubq_u16(
>> +			vreinterpretq_u16_u64(pkt_mb[3]), len_adjust));
>> +
>> +	vst1q_u64((void *)&rx_pkts[0]->rx_descriptor_fields1, pkt_mb[0]);
>> +	vst1q_u64((void *)&rx_pkts[1]->rx_descriptor_fields1, pkt_mb[1]);
>> +	vst1q_u64((void *)&rx_pkts[2]->rx_descriptor_fields1, pkt_mb[2]);
>> +	vst1q_u64((void *)&rx_pkts[3]->rx_descriptor_fields1, pkt_mb[3]);
>> +
>> +	if (hw->has_rx_offload) {
>> +		virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
>> +			char *addr = (char *)rx_pkts[i]->buf_addr +
>> +				RTE_PKTMBUF_HEADROOM - head_size;
>> +			virtio_vec_rx_offload(rx_pkts[i],
>> +					(struct virtio_net_hdr *)addr);
>> +		}
>> +	}
>> +
>> +	virtio_update_batch_stats(&rxvq->stats, rx_pkts[0]->pkt_len,
>> +			rx_pkts[1]->pkt_len, rx_pkts[2]->pkt_len,
>> +			rx_pkts[3]->pkt_len);
>> +
>> +	vq->vq_free_cnt += PACKED_BATCH_SIZE;
>> +
>> +	vq->vq_used_cons_idx += PACKED_BATCH_SIZE;
>> +	if (vq->vq_used_cons_idx >= vq->vq_nentries) {
>> +		vq->vq_used_cons_idx -= vq->vq_nentries;
>> +		vq->vq_packed.used_wrap_counter ^= 1;
>> +	}
>> +
>> +	return 0;
>> +}
>>
> 


  reply	other threads:[~2021-01-05 14:27 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-11 12:09 [dpdk-dev] [RFC 0/3] Vectorize packed ring RX path with NEON Joyce Kong
2020-09-11 12:09 ` [dpdk-dev] [RFC 1/3] net/virtio: move AVX based Rx and Tx code to separate file Joyce Kong
2020-09-11 12:09 ` [dpdk-dev] [RFC 2/3] net/virtio: add vectorized packed ring Rx NEON path Joyce Kong
2020-09-11 12:09 ` [dpdk-dev] [RFC 3/3] net/virtio: add election for packed vector " Joyce Kong
2020-10-05  7:34 ` [dpdk-dev] [RFC 0/3] Vectorize packed ring RX path with NEON Maxime Coquelin
2020-10-08  6:54   ` Joyce Kong
2020-10-15  9:01   ` Ruifeng Wang
2020-10-15  9:02     ` Maxime Coquelin
2020-11-17 10:06 ` [dpdk-dev] [PATCH v1 0/4] Vectorize packed ring RX/TX " Joyce Kong
2020-11-17 10:06   ` [dpdk-dev] [PATCH v1 1/4] net/virtio: move AVX based Rx and Tx code to separate file Joyce Kong
2021-01-05 14:06     ` Maxime Coquelin
2020-11-17 10:06   ` [dpdk-dev] [PATCH v1 2/4] net/virtio: add vectorized packed ring Rx NEON path Joyce Kong
2021-01-05 14:16     ` Maxime Coquelin
2021-01-05 14:27       ` Maxime Coquelin [this message]
2021-01-07 10:39         ` Maxime Coquelin
2021-01-08  7:29           ` Joyce Kong
2021-01-08 17:02     ` Ferruh Yigit
2021-01-08 22:26       ` Honnappa Nagarahalli
2021-01-11 13:05         ` Aaron Conole
2021-01-11 10:45       ` Maxime Coquelin
2021-01-11 13:04       ` Aaron Conole
2020-11-17 10:06   ` [dpdk-dev] [PATCH v1 3/4] net/virtio: add vectorized packed ring Tx " Joyce Kong
2021-01-05 14:33     ` Maxime Coquelin
2020-11-17 10:06   ` [dpdk-dev] [PATCH v1 4/4] net/virtio: add election for packed vector " Joyce Kong
2021-01-05 14:42     ` Maxime Coquelin
2021-01-08  9:11   ` [dpdk-dev] [PATCH v1 0/4] Vectorize packed ring RX/TX path with NEON Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1473dd81-26aa-04ae-dd57-14ddf7880fd0@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=honnappa.nagarahalli@arm.com \
    --cc=jerinj@marvell.com \
    --cc=joyce.kong@arm.com \
    --cc=nd@arm.com \
    --cc=ruifeng.wang@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).