DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/2] net/hns3: support SVE Tx/Rx
@ 2020-10-14 10:01 Wei Hu (Xavier)
  2020-10-14 10:01 ` [dpdk-dev] [PATCH 1/2] net/hns3: support SVE Rx Wei Hu (Xavier)
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Wei Hu (Xavier) @ 2020-10-14 10:01 UTC (permalink / raw)
  To: dev; +Cc: xavier.huwei

This series add support SVE Tx/Rx operation.

Chengwen Feng (1):
  net/hns3: support SVE Tx

Wei Hu (Xavier) (1):
  net/hns3: support SVE Rx

 drivers/net/hns3/hns3_rxtx.c         |  43 +++-
 drivers/net/hns3/hns3_rxtx.h         |   4 +
 drivers/net/hns3/hns3_rxtx_vec.h     |  34 ++-
 drivers/net/hns3/hns3_rxtx_vec_sve.c | 472 +++++++++++++++++++++++++++++++++++
 drivers/net/hns3/meson.build         |   5 +
 5 files changed, 541 insertions(+), 17 deletions(-)
 create mode 100644 drivers/net/hns3/hns3_rxtx_vec_sve.c

-- 
2.9.5


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-dev] [PATCH 1/2] net/hns3: support SVE Rx
  2020-10-14 10:01 [dpdk-dev] [PATCH 0/2] net/hns3: support SVE Tx/Rx Wei Hu (Xavier)
@ 2020-10-14 10:01 ` Wei Hu (Xavier)
  2020-10-25 16:18   ` David Marchand
  2020-10-14 10:01 ` [dpdk-dev] [PATCH 2/2] net/hns3: support SVE Tx Wei Hu (Xavier)
  2020-10-14 23:35 ` [dpdk-dev] [PATCH 0/2] net/hns3: support SVE Tx/Rx Ferruh Yigit
  2 siblings, 1 reply; 7+ messages in thread
From: Wei Hu (Xavier) @ 2020-10-14 10:01 UTC (permalink / raw)
  To: dev; +Cc: xavier.huwei

From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>

This patch adds SVE vector instructions to optimize Rx burst process.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
---
 drivers/net/hns3/hns3_rxtx.c         |  28 +++-
 drivers/net/hns3/hns3_rxtx.h         |   2 +
 drivers/net/hns3/hns3_rxtx_vec_sve.c | 313 +++++++++++++++++++++++++++++++++++
 drivers/net/hns3/meson.build         |   5 +
 4 files changed, 346 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/hns3/hns3_rxtx_vec_sve.c

diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 60f110c..e1ff173 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -23,6 +23,9 @@
 #include <rte_net.h>
 #include <rte_malloc.h>
 #include <rte_pci.h>
+#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT)
+#include <rte_cpuflags.h>
+#endif
 
 #include "hns3_ethdev.h"
 #include "hns3_rxtx.h"
@@ -1841,7 +1844,8 @@ hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 
 	if (dev->rx_pkt_burst == hns3_recv_pkts ||
 	    dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
-	    dev->rx_pkt_burst == hns3_recv_pkts_vec)
+	    dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+	    dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
 		return ptypes;
 
 	return NULL;
@@ -2347,6 +2351,14 @@ hns3_recv_pkts_vec(__rte_unused void *tx_queue,
 	return 0;
 }
 
+uint16_t __rte_weak
+hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue,
+		       __rte_unused struct rte_mbuf **tx_pkts,
+		       __rte_unused uint16_t nb_pkts)
+{
+	return 0;
+}
+
 int
 hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
 		       struct rte_eth_burst_mode *mode)
@@ -2358,6 +2370,7 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
 		{ hns3_recv_pkts,		"Scalar" },
 		{ hns3_recv_scattered_pkts,	"Scalar Scattered" },
 		{ hns3_recv_pkts_vec,		"Vector Neon" },
+		{ hns3_recv_pkts_vec_sve,	"Vector Sve" },
 	};
 
 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
@@ -2376,6 +2389,16 @@ hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
 	return ret;
 }
 
+static bool
+hns3_check_sve_support(void)
+{
+#if defined(RTE_ARCH_ARM64) && defined(CC_SVE_SUPPORT)
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
+		return true;
+#endif
+	return false;
+}
+
 static eth_rx_burst_t
 hns3_get_rx_function(struct rte_eth_dev *dev)
 {
@@ -2383,7 +2406,8 @@ hns3_get_rx_function(struct rte_eth_dev *dev)
 	uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
 
 	if (hns->rx_vec_allowed && hns3_rx_check_vec_support(dev) == 0)
-		return hns3_recv_pkts_vec;
+		return hns3_check_sve_support() ? hns3_recv_pkts_vec_sve :
+		       hns3_recv_pkts_vec;
 
 	if (hns->rx_simple_allowed && !dev->data->scattered_rx &&
 	    (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0)
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index abc7c54..ba1b017 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -630,6 +630,8 @@ uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 				  uint16_t nb_pkts);
 uint16_t hns3_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 			    uint16_t nb_pkts);
+uint16_t hns3_recv_pkts_vec_sve(void *rx_queue, struct rte_mbuf **rx_pkts,
+				uint16_t nb_pkts);
 int hns3_rx_burst_mode_get(struct rte_eth_dev *dev,
 			   __rte_unused uint16_t queue_id,
 			   struct rte_eth_burst_mode *mode);
diff --git a/drivers/net/hns3/hns3_rxtx_vec_sve.c b/drivers/net/hns3/hns3_rxtx_vec_sve.c
new file mode 100644
index 0000000..9a81cb0
--- /dev/null
+++ b/drivers/net/hns3/hns3_rxtx_vec_sve.c
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Hisilicon Limited.
+ */
+
+#include <arm_sve.h>
+#include <rte_io.h>
+#include <rte_ethdev_driver.h>
+
+#include "hns3_ethdev.h"
+#include "hns3_rxtx.h"
+#include "hns3_rxtx_vec.h"
+
+#define PG16_128BIT		svwhilelt_b16(0, 8)
+#define PG16_256BIT		svwhilelt_b16(0, 16)
+#define PG32_256BIT		svwhilelt_b32(0, 8)
+#define PG64_64BIT		svwhilelt_b64(0, 1)
+#define PG64_128BIT		svwhilelt_b64(0, 2)
+#define PG64_256BIT		svwhilelt_b64(0, 4)
+#define PG64_ALLBIT		svptrue_b64()
+
+#define BD_SIZE			32
+#define BD_FIELD_ADDR_OFFSET	0
+#define BD_FIELD_L234_OFFSET	8
+#define BD_FIELD_XLEN_OFFSET	12
+#define BD_FIELD_RSS_OFFSET	16
+#define BD_FIELD_OL_OFFSET	24
+#define BD_FIELD_VALID_OFFSET	28
+
+typedef struct {
+	uint32_t l234_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP];
+	uint32_t ol_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP];
+	uint32_t bd_base_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP];
+} HNS3_SVE_KEY_FIELD_S;
+
+static inline uint32_t
+hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq,
+			  struct rte_mbuf **rx_pkts,
+			  HNS3_SVE_KEY_FIELD_S *key,
+			  uint32_t   bd_vld_num)
+{
+	uint32_t retcode = 0;
+	uint32_t cksum_err;
+	int ret, i;
+
+	for (i = 0; i < (int)bd_vld_num; i++) {
+		/* init rte_mbuf.rearm_data last 64-bit */
+		rx_pkts[i]->ol_flags = PKT_RX_RSS_HASH;
+
+		ret = hns3_handle_bdinfo(rxq, rx_pkts[i], key->bd_base_info[i],
+					 key->l234_info[i], &cksum_err);
+		if (unlikely(ret)) {
+			retcode |= 1u << i;
+			continue;
+		}
+
+		rx_pkts[i]->packet_type = hns3_rx_calc_ptype(rxq,
+					key->l234_info[i], key->ol_info[i]);
+		if (likely(key->bd_base_info[i] & BIT(HNS3_RXD_L3L4P_B)))
+			hns3_rx_set_cksum_flag(rx_pkts[i],
+					rx_pkts[i]->packet_type, cksum_err);
+	}
+
+	return retcode;
+}
+
+static inline void
+hns3_rx_prefetch_mbuf_sve(struct hns3_entry *sw_ring)
+{
+	svuint64_t prf1st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[0]);
+	svuint64_t prf2st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[4]);
+	svprfd_gather_u64base(PG64_256BIT, prf1st, SV_PLDL1KEEP);
+	svprfd_gather_u64base(PG64_256BIT, prf2st, SV_PLDL1KEEP);
+}
+
+static inline uint16_t
+hns3_recv_burst_vec_sve(struct hns3_rx_queue *__restrict rxq,
+			struct rte_mbuf **__restrict rx_pkts,
+			uint16_t nb_pkts,
+			uint64_t *bd_err_mask)
+{
+#define XLEN_ADJUST_LEN		32
+#define RSS_ADJUST_LEN		16
+#define GEN_VLD_U8_ZIP_INDEX	svindex_s8(28, -4)
+	uint16_t rx_id = rxq->next_to_use;
+	struct hns3_entry *sw_ring = &rxq->sw_ring[rx_id];
+	struct hns3_desc *rxdp = &rxq->rx_ring[rx_id];
+	struct hns3_desc *rxdp2;
+	HNS3_SVE_KEY_FIELD_S key_field;
+	uint64_t bd_valid_num;
+	uint32_t parse_retcode;
+	uint16_t nb_rx = 0;
+	int pos, offset;
+
+	uint16_t xlen_adjust[XLEN_ADJUST_LEN] = {
+		0,  0xffff, 1,  0xffff,    /* 1st mbuf: pkt_len and dat_len */
+		2,  0xffff, 3,  0xffff,    /* 2st mbuf: pkt_len and dat_len */
+		4,  0xffff, 5,  0xffff,    /* 3st mbuf: pkt_len and dat_len */
+		6,  0xffff, 7,  0xffff,    /* 4st mbuf: pkt_len and dat_len */
+		8,  0xffff, 9,  0xffff,    /* 5st mbuf: pkt_len and dat_len */
+		10, 0xffff, 11, 0xffff,    /* 6st mbuf: pkt_len and dat_len */
+		12, 0xffff, 13, 0xffff,    /* 7st mbuf: pkt_len and dat_len */
+		14, 0xffff, 15, 0xffff,    /* 8st mbuf: pkt_len and dat_len */
+	};
+
+	uint32_t rss_adjust[RSS_ADJUST_LEN] = {
+		0, 0xffff,        /* 1st mbuf: rss */
+		1, 0xffff,        /* 2st mbuf: rss */
+		2, 0xffff,        /* 3st mbuf: rss */
+		3, 0xffff,        /* 4st mbuf: rss */
+		4, 0xffff,        /* 5st mbuf: rss */
+		5, 0xffff,        /* 6st mbuf: rss */
+		6, 0xffff,        /* 7st mbuf: rss */
+		7, 0xffff,        /* 8st mbuf: rss */
+	};
+
+	svbool_t pg32 = svwhilelt_b32(0, HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
+	svuint16_t xlen_tbl1 = svld1_u16(PG16_256BIT, xlen_adjust);
+	svuint16_t xlen_tbl2 = svld1_u16(PG16_256BIT, &xlen_adjust[16]);
+	svuint32_t rss_tbl1 = svld1_u32(PG32_256BIT, rss_adjust);
+	svuint32_t rss_tbl2 = svld1_u32(PG32_256BIT, &rss_adjust[8]);
+
+	for (pos = 0; pos < nb_pkts; pos += HNS3_SVE_DEFAULT_DESCS_PER_LOOP,
+				     rxdp += HNS3_SVE_DEFAULT_DESCS_PER_LOOP) {
+		svuint64_t vld_clz, mbp1st, mbp2st, mbuf_init;
+		svuint64_t xlen1st, xlen2st, rss1st, rss2st;
+		svuint32_t l234, ol, vld, vld2, xlen, rss;
+		svuint8_t  vld_u8;
+
+		/* calc how many bd valid: part 1 */
+		vld = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp,
+			svindex_u32(BD_FIELD_VALID_OFFSET, BD_SIZE));
+		vld2 = svlsl_n_u32_z(pg32, vld,
+				    HNS3_UINT32_BIT - 1 - HNS3_RXD_VLD_B);
+		vld2 = svreinterpret_u32_s32(svasr_n_s32_z(pg32,
+			svreinterpret_s32_u32(vld2), HNS3_UINT32_BIT - 1));
+
+		/* load 4 mbuf pointer */
+		mbp1st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[pos]);
+
+		/* calc how many bd valid: part 2 */
+		vld_u8 = svtbl_u8(svreinterpret_u8_u32(vld2),
+				  svreinterpret_u8_s8(GEN_VLD_U8_ZIP_INDEX));
+		vld_clz = svnot_u64_z(PG64_64BIT, svreinterpret_u64_u8(vld_u8));
+		vld_clz = svclz_u64_z(PG64_64BIT, vld_clz);
+		svst1_u64(PG64_64BIT, &bd_valid_num, vld_clz);
+		bd_valid_num /= HNS3_UINT8_BIT;
+
+		/* load 4 more mbuf pointer */
+		mbp2st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[pos + 4]);
+
+		/* use offset to control below data load oper ordering */
+		offset = rxq->offset_table[bd_valid_num];
+		rxdp2 = rxdp + offset;
+
+		/* store 4 mbuf pointer into rx_pkts */
+		svst1_u64(PG64_256BIT, (uint64_t *)&rx_pkts[pos], mbp1st);
+
+		/* load key field to vector reg */
+		l234 = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
+				svindex_u32(BD_FIELD_L234_OFFSET, BD_SIZE));
+		ol = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
+				svindex_u32(BD_FIELD_OL_OFFSET, BD_SIZE));
+
+		/* store 4 mbuf pointer into rx_pkts again */
+		svst1_u64(PG64_256BIT, (uint64_t *)&rx_pkts[pos + 4], mbp2st);
+
+		/* load datalen, pktlen and rss_hash */
+		xlen = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
+				svindex_u32(BD_FIELD_XLEN_OFFSET, BD_SIZE));
+		rss = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
+				svindex_u32(BD_FIELD_RSS_OFFSET, BD_SIZE));
+
+		/* store key field to stash buffer */
+		svst1_u32(pg32, (uint32_t *)key_field.l234_info, l234);
+		svst1_u32(pg32, (uint32_t *)key_field.bd_base_info, vld);
+		svst1_u32(pg32, (uint32_t *)key_field.ol_info, ol);
+
+		/* sub crc_len for pkt_len and data_len */
+		xlen = svreinterpret_u32_u16(svsub_n_u16_z(PG16_256BIT,
+			svreinterpret_u16_u32(xlen), rxq->crc_len));
+
+		/* init mbuf_initializer */
+		mbuf_init = svdup_n_u64(rxq->mbuf_initializer);
+
+		/* extract datalen, pktlen and rss from xlen and rss */
+		xlen1st = svreinterpret_u64_u16(
+			svtbl_u16(svreinterpret_u16_u32(xlen), xlen_tbl1));
+		xlen2st = svreinterpret_u64_u16(
+			svtbl_u16(svreinterpret_u16_u32(xlen), xlen_tbl2));
+		rss1st = svreinterpret_u64_u32(
+			svtbl_u32(svreinterpret_u32_u32(rss), rss_tbl1));
+		rss2st = svreinterpret_u64_u32(
+			svtbl_u32(svreinterpret_u32_u32(rss), rss_tbl2));
+
+		/* save mbuf_initializer */
+		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st,
+			offsetof(struct rte_mbuf, rearm_data), mbuf_init);
+		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st,
+			offsetof(struct rte_mbuf, rearm_data), mbuf_init);
+
+		/* save datalen and pktlen and rss */
+		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st,
+			offsetof(struct rte_mbuf, pkt_len), xlen1st);
+		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st,
+			offsetof(struct rte_mbuf, hash.rss), rss1st);
+		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st,
+			offsetof(struct rte_mbuf, pkt_len), xlen2st);
+		svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st,
+			offsetof(struct rte_mbuf, hash.rss), rss2st);
+
+		rte_prefetch_non_temporal(rxdp +
+					  HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
+
+		parse_retcode = hns3_desc_parse_field_sve(rxq, &rx_pkts[pos],
+					&key_field, bd_valid_num);
+		if (unlikely(parse_retcode))
+			(*bd_err_mask) |= ((uint64_t)parse_retcode) << pos;
+
+		hns3_rx_prefetch_mbuf_sve(&sw_ring[pos +
+					HNS3_SVE_DEFAULT_DESCS_PER_LOOP]);
+
+		nb_rx += bd_valid_num;
+		if (unlikely(bd_valid_num < HNS3_SVE_DEFAULT_DESCS_PER_LOOP))
+			break;
+	}
+
+	rxq->rx_rearm_nb += nb_rx;
+	rxq->next_to_use += nb_rx;
+	if (rxq->next_to_use >= rxq->nb_rx_desc)
+		rxq->next_to_use = 0;
+
+	return nb_rx;
+}
+
+static inline void
+hns3_rxq_rearm_mbuf_sve(struct hns3_rx_queue *rxq)
+{
+#define REARM_LOOP_STEP_NUM	4
+	struct hns3_entry *rxep = &rxq->sw_ring[rxq->rx_rearm_start];
+	struct hns3_desc *rxdp = rxq->rx_ring + rxq->rx_rearm_start;
+	struct hns3_entry *rxep_tmp = rxep;
+	int i;
+
+	if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
+					  HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) {
+		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+		return;
+	}
+
+	for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM,
+		rxep_tmp += REARM_LOOP_STEP_NUM) {
+		svuint64_t prf = svld1_u64(PG64_256BIT, (uint64_t *)rxep_tmp);
+		svprfd_gather_u64base(PG64_256BIT, prf, SV_PLDL1STRM);
+	}
+
+	for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM,
+		rxep += REARM_LOOP_STEP_NUM, rxdp += REARM_LOOP_STEP_NUM) {
+		uint64_t iova[REARM_LOOP_STEP_NUM];
+		iova[0] = rxep[0].mbuf->buf_iova;
+		iova[1] = rxep[1].mbuf->buf_iova;
+		iova[2] = rxep[2].mbuf->buf_iova;
+		iova[3] = rxep[3].mbuf->buf_iova;
+		svuint64_t siova = svld1_u64(PG64_256BIT, iova);
+		siova = svadd_n_u64_z(PG64_256BIT, siova, RTE_PKTMBUF_HEADROOM);
+		svuint64_t ol_base = svdup_n_u64(0);
+		svst1_scatter_u64offset_u64(PG64_256BIT,
+			(uint64_t *)&rxdp[0].addr,
+			svindex_u64(BD_FIELD_ADDR_OFFSET, BD_SIZE), siova);
+		svst1_scatter_u64offset_u64(PG64_256BIT,
+			(uint64_t *)&rxdp[0].addr,
+			svindex_u64(BD_FIELD_OL_OFFSET, BD_SIZE), ol_base);
+	}
+
+	rxq->rx_rearm_start += HNS3_DEFAULT_RXQ_REARM_THRESH;
+	if (rxq->rx_rearm_start >= rxq->nb_rx_desc)
+		rxq->rx_rearm_start = 0;
+
+	rxq->rx_rearm_nb -= HNS3_DEFAULT_RXQ_REARM_THRESH;
+
+	hns3_write_reg_opt(rxq->io_head_reg, HNS3_DEFAULT_RXQ_REARM_THRESH);
+}
+
+uint16_t
+hns3_recv_pkts_vec_sve(void *__restrict rx_queue,
+		       struct rte_mbuf **__restrict rx_pkts,
+		       uint16_t nb_pkts)
+{
+	struct hns3_rx_queue *rxq = rx_queue;
+	struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use];
+	uint64_t bd_err_mask;  /* bit mask indicate whick pkts is error */
+	uint16_t nb_rx;
+
+	rte_prefetch_non_temporal(rxdp);
+
+	nb_pkts = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST);
+	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
+
+	if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH)
+		hns3_rxq_rearm_mbuf_sve(rxq);
+
+	if (unlikely(!(rxdp->rx.bd_base_info &
+			rte_cpu_to_le_32(1u << HNS3_RXD_VLD_B))))
+		return 0;
+
+	hns3_rx_prefetch_mbuf_sve(&rxq->sw_ring[rxq->next_to_use]);
+
+	bd_err_mask = 0;
+	nb_rx = hns3_recv_burst_vec_sve(rxq, rx_pkts, nb_pkts, &bd_err_mask);
+	if (unlikely(bd_err_mask))
+		nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, bd_err_mask);
+
+	return nb_rx;
+}
diff --git a/drivers/net/hns3/meson.build b/drivers/net/hns3/meson.build
index bf69ad4..ca0f21a 100644
--- a/drivers/net/hns3/meson.build
+++ b/drivers/net/hns3/meson.build
@@ -31,4 +31,9 @@ deps += ['hash']
 
 if arch_subdir == 'arm' and dpdk_conf.get('RTE_ARCH_64')
 	sources += files('hns3_rxtx_vec.c')
+	if (dpdk_conf.has('RTE_MACHINE_CPUFLAG_SVE'))
+		dpdk_conf.set('RTE_LIBRTE_HNS3_INC_VECTOR_SVE', 1)
+		cflags = ['-DCC_SVE_SUPPORT']
+		sources += files('hns3_rxtx_vec_sve.c')
+	endif
 endif
-- 
2.9.5


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-dev] [PATCH 2/2] net/hns3: support SVE Tx
  2020-10-14 10:01 [dpdk-dev] [PATCH 0/2] net/hns3: support SVE Tx/Rx Wei Hu (Xavier)
  2020-10-14 10:01 ` [dpdk-dev] [PATCH 1/2] net/hns3: support SVE Rx Wei Hu (Xavier)
@ 2020-10-14 10:01 ` Wei Hu (Xavier)
  2020-10-14 23:35 ` [dpdk-dev] [PATCH 0/2] net/hns3: support SVE Tx/Rx Ferruh Yigit
  2 siblings, 0 replies; 7+ messages in thread
From: Wei Hu (Xavier) @ 2020-10-14 10:01 UTC (permalink / raw)
  To: dev; +Cc: xavier.huwei

From: Chengwen Feng <fengchengwen@huawei.com>

This patch adds SVE vector instructions to optimize Tx burst process.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Huisong Li <lihuisong@huawei.com>
---
 drivers/net/hns3/hns3_rxtx.c         |  17 +++-
 drivers/net/hns3/hns3_rxtx.h         |   2 +
 drivers/net/hns3/hns3_rxtx_vec.h     |  34 +++++---
 drivers/net/hns3/hns3_rxtx_vec_sve.c | 159 +++++++++++++++++++++++++++++++++++
 4 files changed, 196 insertions(+), 16 deletions(-)

diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index e1ff173..d511908 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -2345,7 +2345,7 @@ hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
 
 uint16_t __rte_weak
 hns3_recv_pkts_vec(__rte_unused void *tx_queue,
-		   __rte_unused struct rte_mbuf **tx_pkts,
+		   __rte_unused struct rte_mbuf **rx_pkts,
 		   __rte_unused uint16_t nb_pkts)
 {
 	return 0;
@@ -2353,7 +2353,7 @@ hns3_recv_pkts_vec(__rte_unused void *tx_queue,
 
 uint16_t __rte_weak
 hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue,
-		       __rte_unused struct rte_mbuf **tx_pkts,
+		       __rte_unused struct rte_mbuf **rx_pkts,
 		       __rte_unused uint16_t nb_pkts)
 {
 	return 0;
@@ -3615,6 +3615,14 @@ hns3_xmit_pkts_vec(__rte_unused void *tx_queue,
 	return 0;
 }
 
+uint16_t __rte_weak
+hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue,
+		       struct rte_mbuf __rte_unused **tx_pkts,
+		       uint16_t __rte_unused nb_pkts)
+{
+	return 0;
+}
+
 int
 hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
 		       struct rte_eth_burst_mode *mode)
@@ -3628,6 +3636,8 @@ hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
 		info = "Scalar";
 	else if (pkt_burst == hns3_xmit_pkts_vec)
 		info = "Vector Neon";
+	else if (pkt_burst == hns3_xmit_pkts_vec_sve)
+		info = "Vector Sve";
 
 	if (info == NULL)
 		return -EINVAL;
@@ -3645,7 +3655,8 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
 
 	if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
 		*prep = NULL;
-		return hns3_xmit_pkts_vec;
+		return hns3_check_sve_support() ? hns3_xmit_pkts_vec_sve :
+			hns3_xmit_pkts_vec;
 	}
 
 	if (hns->tx_simple_allowed &&
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index ba1b017..4be9c4a 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -644,6 +644,8 @@ uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts);
 uint16_t hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 							uint16_t nb_pkts);
+uint16_t hns3_xmit_pkts_vec_sve(void *tx_queue, struct rte_mbuf **tx_pkts,
+				uint16_t nb_pkts);
 int hns3_tx_burst_mode_get(struct rte_eth_dev *dev,
 			   __rte_unused uint16_t queue_id,
 			   struct rte_eth_burst_mode *mode);
diff --git a/drivers/net/hns3/hns3_rxtx_vec.h b/drivers/net/hns3/hns3_rxtx_vec.h
index c6df36d..35d9903 100644
--- a/drivers/net/hns3/hns3_rxtx_vec.h
+++ b/drivers/net/hns3/hns3_rxtx_vec.h
@@ -9,26 +9,14 @@
 #include "hns3_ethdev.h"
 
 static inline void
-hns3_tx_free_buffers(struct hns3_tx_queue *txq)
+hns3_tx_bulk_free_buffers(struct hns3_tx_queue *txq)
 {
 	struct rte_mbuf **free = txq->free;
 	struct hns3_entry *tx_entry;
-	struct hns3_desc *tx_desc;
 	struct rte_mbuf *m;
 	int nb_free = 0;
 	int i;
 
-	/*
-	 * All mbufs can be released only when the VLD bits of all
-	 * descriptors in a batch are cleared.
-	 */
-	tx_desc = &txq->tx_ring[txq->next_to_clean];
-	for (i = 0; i < txq->tx_rs_thresh; i++, tx_desc++) {
-		if (tx_desc->tx.tp_fe_sc_vld_ra_ri &
-				rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B)))
-			return;
-	}
-
 	tx_entry = &txq->sw_ring[txq->next_to_clean];
 	for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
 		m = rte_pktmbuf_prefree_seg(tx_entry->mbuf);
@@ -55,6 +43,26 @@ hns3_tx_free_buffers(struct hns3_tx_queue *txq)
 		txq->next_to_clean = 0;
 }
 
+static inline void
+hns3_tx_free_buffers(struct hns3_tx_queue *txq)
+{
+	struct hns3_desc *tx_desc;
+	int i;
+
+	/*
+	 * All mbufs can be released only when the VLD bits of all
+	 * descriptors in a batch are cleared.
+	 */
+	tx_desc = &txq->tx_ring[txq->next_to_clean];
+	for (i = 0; i < txq->tx_rs_thresh; i++, tx_desc++) {
+		if (tx_desc->tx.tp_fe_sc_vld_ra_ri &
+				rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B)))
+			return;
+	}
+
+	hns3_tx_bulk_free_buffers(txq);
+}
+
 static inline uint16_t
 hns3_rx_reassemble_pkts(struct rte_mbuf **rx_pkts,
 			uint16_t nb_pkts,
diff --git a/drivers/net/hns3/hns3_rxtx_vec_sve.c b/drivers/net/hns3/hns3_rxtx_vec_sve.c
index 9a81cb0..8c2c8f6 100644
--- a/drivers/net/hns3/hns3_rxtx_vec_sve.c
+++ b/drivers/net/hns3/hns3_rxtx_vec_sve.c
@@ -311,3 +311,162 @@ hns3_recv_pkts_vec_sve(void *__restrict rx_queue,
 
 	return nb_rx;
 }
+
+static inline void
+hns3_tx_free_buffers_sve(struct hns3_tx_queue *txq)
+{
+#define HNS3_SVE_CHECK_DESCS_PER_LOOP	8
+#define TX_VLD_U8_ZIP_INDEX		svindex_u8(0, 4)
+	svbool_t pg32 = svwhilelt_b32(0, HNS3_SVE_CHECK_DESCS_PER_LOOP);
+	svuint32_t vld, vld2;
+	svuint8_t vld_u8;
+	uint64_t vld_all;
+	struct hns3_desc *tx_desc;
+	int i;
+
+	/*
+	 * All mbufs can be released only when the VLD bits of all
+	 * descriptors in a batch are cleared.
+	 */
+	/* do logical OR operation for all desc's valid field */
+	vld = svdup_n_u32(0);
+	tx_desc = &txq->tx_ring[txq->next_to_clean];
+	for (i = 0; i < txq->tx_rs_thresh; i += HNS3_SVE_CHECK_DESCS_PER_LOOP,
+				tx_desc += HNS3_SVE_CHECK_DESCS_PER_LOOP) {
+		vld2 = svld1_gather_u32offset_u32(pg32, (uint32_t *)tx_desc,
+				svindex_u32(BD_FIELD_VALID_OFFSET, BD_SIZE));
+		vld = svorr_u32_z(pg32, vld, vld2);
+	}
+	/* shift left and then right to get all valid bit */
+	vld = svlsl_n_u32_z(pg32, vld,
+			    HNS3_UINT32_BIT - 1 - HNS3_TXD_VLD_B);
+	vld = svreinterpret_u32_s32(svasr_n_s32_z(pg32,
+		svreinterpret_s32_u32(vld), HNS3_UINT32_BIT - 1));
+	/* use tbl to compress 32bit-lane to 8bit-lane */
+	vld_u8 = svtbl_u8(svreinterpret_u8_u32(vld), TX_VLD_U8_ZIP_INDEX);
+	/* dump compressed 64bit to variable */
+	svst1_u64(PG64_64BIT, &vld_all, svreinterpret_u64_u8(vld_u8));
+	if (vld_all > 0)
+		return;
+
+	hns3_tx_bulk_free_buffers(txq);
+}
+
+static inline void
+hns3_tx_fill_hw_ring_sve(struct hns3_tx_queue *txq,
+			 struct rte_mbuf **pkts,
+			 uint16_t nb_pkts)
+{
+#define DATA_OFF_LEN_VAL_MASK	0xFFFF
+	struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use];
+	struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use];
+	const uint64_t valid_bit = (BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B)) <<
+				   HNS3_UINT32_BIT;
+	svuint64_t base_addr, buf_iova, data_off, data_len, addr;
+	svuint64_t offsets = svindex_u64(0, BD_SIZE);
+	uint32_t i = 0;
+	svbool_t pg = svwhilelt_b64_u32(i, nb_pkts);
+
+	do {
+		base_addr = svld1_u64(pg, (uint64_t *)pkts);
+		/* calc mbuf's field buf_iova address */
+		buf_iova = svadd_n_u64_z(pg, base_addr,
+					 offsetof(struct rte_mbuf, buf_iova));
+		/* calc mbuf's field data_off address */
+		data_off = svadd_n_u64_z(pg, base_addr,
+					 offsetof(struct rte_mbuf, data_off));
+		/* calc mbuf's field data_len address */
+		data_len = svadd_n_u64_z(pg, base_addr,
+					 offsetof(struct rte_mbuf, data_len));
+		/* store mbuf to tx_entry */
+		svst1_u64(pg, (uint64_t *)tx_entry, base_addr);
+		/* read pkts->buf_iova */
+		buf_iova = svld1_gather_u64base_u64(pg, buf_iova);
+		/* read pkts->data_off's 64bit val  */
+		data_off = svld1_gather_u64base_u64(pg, data_off);
+		/* read pkts->data_len's 64bit val */
+		data_len = svld1_gather_u64base_u64(pg, data_len);
+		/* zero data_off high 48bit by svand ops */
+		data_off = svand_n_u64_z(pg, data_off, DATA_OFF_LEN_VAL_MASK);
+		/* zero data_len high 48bit by svand ops */
+		data_len = svand_n_u64_z(pg, data_len, DATA_OFF_LEN_VAL_MASK);
+		/* calc mbuf data region iova addr */
+		addr = svadd_u64_z(pg, buf_iova, data_off);
+		/* shift due data_len's offset is 2byte of BD's second 8byte */
+		data_len = svlsl_n_u64_z(pg, data_len, HNS3_UINT16_BIT);
+		/* save offset 0~7byte of every BD */
+		svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->addr,
+					    offsets, addr);
+		/* save offset 8~15byte of every BD */
+		svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->tx.vlan_tag,
+					    offsets, data_len);
+		/* save offset 16~23byte of every BD */
+		svst1_scatter_u64offset_u64(pg,
+				(uint64_t *)&txdp->tx.outer_vlan_tag,
+				offsets, svdup_n_u64(0));
+		/* save offset 24~31byte of every BD */
+		svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->tx.paylen,
+					    offsets, svdup_n_u64(valid_bit));
+
+		/* update index for next loop */
+		i += svcntd();
+		pkts += svcntd();
+		txdp += svcntd();
+		tx_entry += svcntd();
+		pg = svwhilelt_b64_u32(i, nb_pkts);
+	} while (svptest_any(svptrue_b64(), pg));
+}
+
+static uint16_t
+hns3_xmit_fixed_burst_vec_sve(void *__restrict tx_queue,
+			      struct rte_mbuf **__restrict tx_pkts,
+			      uint16_t nb_pkts)
+{
+	struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue;
+	uint16_t nb_tx = 0;
+
+	if (txq->tx_bd_ready < txq->tx_free_thresh)
+		hns3_tx_free_buffers_sve(txq);
+
+	nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
+	if (unlikely(nb_pkts == 0)) {
+		txq->queue_full_cnt++;
+		return 0;
+	}
+
+	if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) {
+		nb_tx = txq->nb_tx_desc - txq->next_to_use;
+		hns3_tx_fill_hw_ring_sve(txq, tx_pkts, nb_tx);
+		txq->next_to_use = 0;
+	}
+
+	hns3_tx_fill_hw_ring_sve(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
+	txq->next_to_use += nb_pkts - nb_tx;
+
+	txq->tx_bd_ready -= nb_pkts;
+	hns3_write_reg_opt(txq->io_tail_reg, nb_pkts);
+
+	return nb_pkts;
+}
+
+uint16_t
+hns3_xmit_pkts_vec_sve(void *tx_queue,
+		       struct rte_mbuf **tx_pkts,
+		       uint16_t nb_pkts)
+{
+	struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue;
+	uint16_t ret, new_burst;
+	uint16_t nb_tx = 0;
+
+	while (nb_pkts) {
+		new_burst = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+		ret = hns3_xmit_fixed_burst_vec_sve(tx_queue, &tx_pkts[nb_tx],
+						    new_burst);
+		nb_tx += ret;
+		nb_pkts -= ret;
+		if (ret < new_burst)
+			break;
+	}
+
+	return nb_tx;
+}
-- 
2.9.5


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH 0/2] net/hns3: support SVE Tx/Rx
  2020-10-14 10:01 [dpdk-dev] [PATCH 0/2] net/hns3: support SVE Tx/Rx Wei Hu (Xavier)
  2020-10-14 10:01 ` [dpdk-dev] [PATCH 1/2] net/hns3: support SVE Rx Wei Hu (Xavier)
  2020-10-14 10:01 ` [dpdk-dev] [PATCH 2/2] net/hns3: support SVE Tx Wei Hu (Xavier)
@ 2020-10-14 23:35 ` Ferruh Yigit
  2 siblings, 0 replies; 7+ messages in thread
From: Ferruh Yigit @ 2020-10-14 23:35 UTC (permalink / raw)
  To: Wei Hu (Xavier), dev; +Cc: xavier.huwei

On 10/14/2020 11:01 AM, Wei Hu (Xavier) wrote:
> This series add support SVE Tx/Rx operation.
> 
> Chengwen Feng (1):
>    net/hns3: support SVE Tx
> 
> Wei Hu (Xavier) (1):
>    net/hns3: support SVE Rx
> 

Series applied to dpdk-next-net/main, thanks.


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] net/hns3: support SVE Rx
  2020-10-14 10:01 ` [dpdk-dev] [PATCH 1/2] net/hns3: support SVE Rx Wei Hu (Xavier)
@ 2020-10-25 16:18   ` David Marchand
  2020-10-27  3:36     ` oulijun
  2020-10-27  6:54     ` oulijun
  0 siblings, 2 replies; 7+ messages in thread
From: David Marchand @ 2020-10-25 16:18 UTC (permalink / raw)
  To: Wei Hu (Xavier); +Cc: dev, Yigit, Ferruh, Thomas Monjalon

Hello Wei Hu,

> diff --git a/drivers/net/hns3/meson.build b/drivers/net/hns3/meson.build
> index bf69ad4..ca0f21a 100644
> --- a/drivers/net/hns3/meson.build
> +++ b/drivers/net/hns3/meson.build
> @@ -31,4 +31,9 @@ deps += ['hash']
>
>  if arch_subdir == 'arm' and dpdk_conf.get('RTE_ARCH_64')
>         sources += files('hns3_rxtx_vec.c')
> +       if (dpdk_conf.has('RTE_MACHINE_CPUFLAG_SVE'))
> +               dpdk_conf.set('RTE_LIBRTE_HNS3_INC_VECTOR_SVE', 1)
> +               cflags = ['-DCC_SVE_SUPPORT']
> +               sources += files('hns3_rxtx_vec_sve.c')
> +       endif
>  endif

This patch is already merged in main, but RTE_MACHINE_CPUFLAG_* have
been removed, see https://git.dpdk.org/dpdk/commit/?id=84fb33fec1
I guess SVE support is not working because of this.

Please use compiler flags to check for support.
If you need an example on how to do this, you can probably look at
what has been done for Intel vector stuff.

Besides, RTE_LIBRTE_HNS3_INC_VECTOR_SVE is not used anywhere and can be removed.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] net/hns3: support SVE Rx
  2020-10-25 16:18   ` David Marchand
@ 2020-10-27  3:36     ` oulijun
  2020-10-27  6:54     ` oulijun
  1 sibling, 0 replies; 7+ messages in thread
From: oulijun @ 2020-10-27  3:36 UTC (permalink / raw)
  To: dev



在 2020/10/26 0:18, David Marchand 写道:
> Hello Wei Hu,
> 
>> diff --git a/drivers/net/hns3/meson.build b/drivers/net/hns3/meson.build
>> index bf69ad4..ca0f21a 100644
>> --- a/drivers/net/hns3/meson.build
>> +++ b/drivers/net/hns3/meson.build
>> @@ -31,4 +31,9 @@ deps += ['hash']
>>
>>   if arch_subdir == 'arm' and dpdk_conf.get('RTE_ARCH_64')
>>          sources += files('hns3_rxtx_vec.c')
>> +       if (dpdk_conf.has('RTE_MACHINE_CPUFLAG_SVE'))
>> +               dpdk_conf.set('RTE_LIBRTE_HNS3_INC_VECTOR_SVE', 1)
>> +               cflags = ['-DCC_SVE_SUPPORT']
>> +               sources += files('hns3_rxtx_vec_sve.c')
>> +       endif
>>   endif
> 
> This patch is already merged in main, but RTE_MACHINE_CPUFLAG_* have
> been removed, see https://git.dpdk.org/dpdk/commit/?id=84fb33fec1
> I guess SVE support is not working because of this.
> 
> Please use compiler flags to check for support.
> If you need an example on how to do this, you can probably look at
> what has been done for Intel vector stuff.
> 
> Besides, RTE_LIBRTE_HNS3_INC_VECTOR_SVE is not used anywhere and can be removed.
> 
Hi, David
   Thank your reviews. Wei Hu has left and adjust the work. I will 
review and upstream his work as a new developer. I will get your 
comments first
and handle it latter.

Thanks
Lijun Ou
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] net/hns3: support SVE Rx
  2020-10-25 16:18   ` David Marchand
  2020-10-27  3:36     ` oulijun
@ 2020-10-27  6:54     ` oulijun
  1 sibling, 0 replies; 7+ messages in thread
From: oulijun @ 2020-10-27  6:54 UTC (permalink / raw)
  To: dev



在 2020/10/26 0:18, David Marchand 写道:
> Hello Wei Hu,
> 
>> diff --git a/drivers/net/hns3/meson.build b/drivers/net/hns3/meson.build
>> index bf69ad4..ca0f21a 100644
>> --- a/drivers/net/hns3/meson.build
>> +++ b/drivers/net/hns3/meson.build
>> @@ -31,4 +31,9 @@ deps += ['hash']
>>
>>   if arch_subdir == 'arm' and dpdk_conf.get('RTE_ARCH_64')
>>          sources += files('hns3_rxtx_vec.c')
>> +       if (dpdk_conf.has('RTE_MACHINE_CPUFLAG_SVE'))
>> +               dpdk_conf.set('RTE_LIBRTE_HNS3_INC_VECTOR_SVE', 1)
>> +               cflags = ['-DCC_SVE_SUPPORT']
>> +               sources += files('hns3_rxtx_vec_sve.c')
>> +       endif
>>   endif
> 
> This patch is already merged in main, but RTE_MACHINE_CPUFLAG_* have
> been removed, see https://git.dpdk.org/dpdk/commit/?id=84fb33fec1
> I guess SVE support is not working because of this.
> 
> Please use compiler flags to check for support.
> If you need an example on how to do this, you can probably look at
> what has been done for Intel vector stuff.
> 
> Besides, RTE_LIBRTE_HNS3_INC_VECTOR_SVE is not used anywhere and can be removed.
> 
Hi,David Marchand
   I have checked. I will send a fix patch for enabling.

Thanks
Lijun Ou
> 

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-10-27  6:54 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-14 10:01 [dpdk-dev] [PATCH 0/2] net/hns3: support SVE Tx/Rx Wei Hu (Xavier)
2020-10-14 10:01 ` [dpdk-dev] [PATCH 1/2] net/hns3: support SVE Rx Wei Hu (Xavier)
2020-10-25 16:18   ` David Marchand
2020-10-27  3:36     ` oulijun
2020-10-27  6:54     ` oulijun
2020-10-14 10:01 ` [dpdk-dev] [PATCH 2/2] net/hns3: support SVE Tx Wei Hu (Xavier)
2020-10-14 23:35 ` [dpdk-dev] [PATCH 0/2] net/hns3: support SVE Tx/Rx Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).