From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3BB7BA04B7; Wed, 14 Oct 2020 12:02:26 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 141271DDE6; Wed, 14 Oct 2020 12:01:43 +0200 (CEST) Received: from incedge.chinasoftinc.com (unknown [114.113.233.8]) by dpdk.org (Postfix) with ESMTP id DDA101DDDD for ; Wed, 14 Oct 2020 12:01:38 +0200 (CEST) X-ASG-Debug-ID: 1602669697-149d114cae25d490001-TfluYd Received: from mail.chinasoftinc.com (inccas001.ito.icss [10.168.0.51]) by incedge.chinasoftinc.com with ESMTP id OhuvVcbSHrW4VQ4R (version=TLSv1 cipher=ECDHE-RSA-AES256-SHA bits=256 verify=NO); Wed, 14 Oct 2020 18:01:37 +0800 (CST) X-Barracuda-Envelope-From: huwei013@chinasoftinc.com X-Barracuda-RBL-Trusted-Forwarder: 10.168.0.51 X-ASG-Whitelist: Client Received: from localhost.localdomain (65.49.108.226) by INCCAS001.ito.icss (10.168.0.60) with Microsoft SMTP Server id 14.3.487.0; Wed, 14 Oct 2020 18:01:36 +0800 From: "Wei Hu (Xavier)" X-Barracuda-RBL-Trusted-Forwarder: 10.168.0.60 To: CC: Date: Wed, 14 Oct 2020 18:01:14 +0800 X-ASG-Orig-Subj: [PATCH 2/2] net/hns3: support SVE Tx Message-ID: <20201014100114.26596-3-huwei013@chinasoftinc.com> X-Mailer: git-send-email 2.9.5 In-Reply-To: <20201014100114.26596-1-huwei013@chinasoftinc.com> References: <20201014100114.26596-1-huwei013@chinasoftinc.com> MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [65.49.108.226] X-Barracuda-Connect: inccas001.ito.icss[10.168.0.51] X-Barracuda-Start-Time: 1602669697 X-Barracuda-Encrypted: ECDHE-RSA-AES256-SHA X-Barracuda-URL: https://incspam.chinasofti.com:443/cgi-mod/mark.cgi X-Virus-Scanned: by bsmtpd at chinasoftinc.com X-Barracuda-Scan-Msg-Size: 10588 Subject: [dpdk-dev] [PATCH 2/2] net/hns3: support SVE Tx X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Chengwen Feng This patch adds SVE vector instructions to optimize Tx burst process. Signed-off-by: Chengwen Feng Signed-off-by: Wei Hu (Xavier) Signed-off-by: Huisong Li --- drivers/net/hns3/hns3_rxtx.c | 17 +++- drivers/net/hns3/hns3_rxtx.h | 2 + drivers/net/hns3/hns3_rxtx_vec.h | 34 +++++--- drivers/net/hns3/hns3_rxtx_vec_sve.c | 159 +++++++++++++++++++++++++++++++++++ 4 files changed, 196 insertions(+), 16 deletions(-) diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index e1ff173..d511908 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -2345,7 +2345,7 @@ hns3_rx_check_vec_support(__rte_unused struct rte_eth_dev *dev) uint16_t __rte_weak hns3_recv_pkts_vec(__rte_unused void *tx_queue, - __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) { return 0; @@ -2353,7 +2353,7 @@ hns3_recv_pkts_vec(__rte_unused void *tx_queue, uint16_t __rte_weak hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue, - __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) { return 0; @@ -3615,6 +3615,14 @@ hns3_xmit_pkts_vec(__rte_unused void *tx_queue, return 0; } +uint16_t __rte_weak +hns3_xmit_pkts_vec_sve(void __rte_unused * tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, struct rte_eth_burst_mode *mode) @@ -3628,6 +3636,8 @@ hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, info = "Scalar"; else if (pkt_burst == hns3_xmit_pkts_vec) info = "Vector Neon"; + else if (pkt_burst == hns3_xmit_pkts_vec_sve) + info = "Vector Sve"; if (info == NULL) return -EINVAL; @@ -3645,7 +3655,8 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) { *prep = NULL; - return hns3_xmit_pkts_vec; + return hns3_check_sve_support() ? hns3_xmit_pkts_vec_sve : + hns3_xmit_pkts_vec; } if (hns->tx_simple_allowed && diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h index ba1b017..4be9c4a 100644 --- a/drivers/net/hns3/hns3_rxtx.h +++ b/drivers/net/hns3/hns3_rxtx.h @@ -644,6 +644,8 @@ uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t hns3_xmit_pkts_vec_sve(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, struct rte_eth_burst_mode *mode); diff --git a/drivers/net/hns3/hns3_rxtx_vec.h b/drivers/net/hns3/hns3_rxtx_vec.h index c6df36d..35d9903 100644 --- a/drivers/net/hns3/hns3_rxtx_vec.h +++ b/drivers/net/hns3/hns3_rxtx_vec.h @@ -9,26 +9,14 @@ #include "hns3_ethdev.h" static inline void -hns3_tx_free_buffers(struct hns3_tx_queue *txq) +hns3_tx_bulk_free_buffers(struct hns3_tx_queue *txq) { struct rte_mbuf **free = txq->free; struct hns3_entry *tx_entry; - struct hns3_desc *tx_desc; struct rte_mbuf *m; int nb_free = 0; int i; - /* - * All mbufs can be released only when the VLD bits of all - * descriptors in a batch are cleared. - */ - tx_desc = &txq->tx_ring[txq->next_to_clean]; - for (i = 0; i < txq->tx_rs_thresh; i++, tx_desc++) { - if (tx_desc->tx.tp_fe_sc_vld_ra_ri & - rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B))) - return; - } - tx_entry = &txq->sw_ring[txq->next_to_clean]; for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) { m = rte_pktmbuf_prefree_seg(tx_entry->mbuf); @@ -55,6 +43,26 @@ hns3_tx_free_buffers(struct hns3_tx_queue *txq) txq->next_to_clean = 0; } +static inline void +hns3_tx_free_buffers(struct hns3_tx_queue *txq) +{ + struct hns3_desc *tx_desc; + int i; + + /* + * All mbufs can be released only when the VLD bits of all + * descriptors in a batch are cleared. + */ + tx_desc = &txq->tx_ring[txq->next_to_clean]; + for (i = 0; i < txq->tx_rs_thresh; i++, tx_desc++) { + if (tx_desc->tx.tp_fe_sc_vld_ra_ri & + rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B))) + return; + } + + hns3_tx_bulk_free_buffers(txq); +} + static inline uint16_t hns3_rx_reassemble_pkts(struct rte_mbuf **rx_pkts, uint16_t nb_pkts, diff --git a/drivers/net/hns3/hns3_rxtx_vec_sve.c b/drivers/net/hns3/hns3_rxtx_vec_sve.c index 9a81cb0..8c2c8f6 100644 --- a/drivers/net/hns3/hns3_rxtx_vec_sve.c +++ b/drivers/net/hns3/hns3_rxtx_vec_sve.c @@ -311,3 +311,162 @@ hns3_recv_pkts_vec_sve(void *__restrict rx_queue, return nb_rx; } + +static inline void +hns3_tx_free_buffers_sve(struct hns3_tx_queue *txq) +{ +#define HNS3_SVE_CHECK_DESCS_PER_LOOP 8 +#define TX_VLD_U8_ZIP_INDEX svindex_u8(0, 4) + svbool_t pg32 = svwhilelt_b32(0, HNS3_SVE_CHECK_DESCS_PER_LOOP); + svuint32_t vld, vld2; + svuint8_t vld_u8; + uint64_t vld_all; + struct hns3_desc *tx_desc; + int i; + + /* + * All mbufs can be released only when the VLD bits of all + * descriptors in a batch are cleared. + */ + /* do logical OR operation for all desc's valid field */ + vld = svdup_n_u32(0); + tx_desc = &txq->tx_ring[txq->next_to_clean]; + for (i = 0; i < txq->tx_rs_thresh; i += HNS3_SVE_CHECK_DESCS_PER_LOOP, + tx_desc += HNS3_SVE_CHECK_DESCS_PER_LOOP) { + vld2 = svld1_gather_u32offset_u32(pg32, (uint32_t *)tx_desc, + svindex_u32(BD_FIELD_VALID_OFFSET, BD_SIZE)); + vld = svorr_u32_z(pg32, vld, vld2); + } + /* shift left and then right to get all valid bit */ + vld = svlsl_n_u32_z(pg32, vld, + HNS3_UINT32_BIT - 1 - HNS3_TXD_VLD_B); + vld = svreinterpret_u32_s32(svasr_n_s32_z(pg32, + svreinterpret_s32_u32(vld), HNS3_UINT32_BIT - 1)); + /* use tbl to compress 32bit-lane to 8bit-lane */ + vld_u8 = svtbl_u8(svreinterpret_u8_u32(vld), TX_VLD_U8_ZIP_INDEX); + /* dump compressed 64bit to variable */ + svst1_u64(PG64_64BIT, &vld_all, svreinterpret_u64_u8(vld_u8)); + if (vld_all > 0) + return; + + hns3_tx_bulk_free_buffers(txq); +} + +static inline void +hns3_tx_fill_hw_ring_sve(struct hns3_tx_queue *txq, + struct rte_mbuf **pkts, + uint16_t nb_pkts) +{ +#define DATA_OFF_LEN_VAL_MASK 0xFFFF + struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use]; + struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use]; + const uint64_t valid_bit = (BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B)) << + HNS3_UINT32_BIT; + svuint64_t base_addr, buf_iova, data_off, data_len, addr; + svuint64_t offsets = svindex_u64(0, BD_SIZE); + uint32_t i = 0; + svbool_t pg = svwhilelt_b64_u32(i, nb_pkts); + + do { + base_addr = svld1_u64(pg, (uint64_t *)pkts); + /* calc mbuf's field buf_iova address */ + buf_iova = svadd_n_u64_z(pg, base_addr, + offsetof(struct rte_mbuf, buf_iova)); + /* calc mbuf's field data_off address */ + data_off = svadd_n_u64_z(pg, base_addr, + offsetof(struct rte_mbuf, data_off)); + /* calc mbuf's field data_len address */ + data_len = svadd_n_u64_z(pg, base_addr, + offsetof(struct rte_mbuf, data_len)); + /* store mbuf to tx_entry */ + svst1_u64(pg, (uint64_t *)tx_entry, base_addr); + /* read pkts->buf_iova */ + buf_iova = svld1_gather_u64base_u64(pg, buf_iova); + /* read pkts->data_off's 64bit val */ + data_off = svld1_gather_u64base_u64(pg, data_off); + /* read pkts->data_len's 64bit val */ + data_len = svld1_gather_u64base_u64(pg, data_len); + /* zero data_off high 48bit by svand ops */ + data_off = svand_n_u64_z(pg, data_off, DATA_OFF_LEN_VAL_MASK); + /* zero data_len high 48bit by svand ops */ + data_len = svand_n_u64_z(pg, data_len, DATA_OFF_LEN_VAL_MASK); + /* calc mbuf data region iova addr */ + addr = svadd_u64_z(pg, buf_iova, data_off); + /* shift due data_len's offset is 2byte of BD's second 8byte */ + data_len = svlsl_n_u64_z(pg, data_len, HNS3_UINT16_BIT); + /* save offset 0~7byte of every BD */ + svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->addr, + offsets, addr); + /* save offset 8~15byte of every BD */ + svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->tx.vlan_tag, + offsets, data_len); + /* save offset 16~23byte of every BD */ + svst1_scatter_u64offset_u64(pg, + (uint64_t *)&txdp->tx.outer_vlan_tag, + offsets, svdup_n_u64(0)); + /* save offset 24~31byte of every BD */ + svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->tx.paylen, + offsets, svdup_n_u64(valid_bit)); + + /* update index for next loop */ + i += svcntd(); + pkts += svcntd(); + txdp += svcntd(); + tx_entry += svcntd(); + pg = svwhilelt_b64_u32(i, nb_pkts); + } while (svptest_any(svptrue_b64(), pg)); +} + +static uint16_t +hns3_xmit_fixed_burst_vec_sve(void *__restrict tx_queue, + struct rte_mbuf **__restrict tx_pkts, + uint16_t nb_pkts) +{ + struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue; + uint16_t nb_tx = 0; + + if (txq->tx_bd_ready < txq->tx_free_thresh) + hns3_tx_free_buffers_sve(txq); + + nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts); + if (unlikely(nb_pkts == 0)) { + txq->queue_full_cnt++; + return 0; + } + + if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) { + nb_tx = txq->nb_tx_desc - txq->next_to_use; + hns3_tx_fill_hw_ring_sve(txq, tx_pkts, nb_tx); + txq->next_to_use = 0; + } + + hns3_tx_fill_hw_ring_sve(txq, tx_pkts + nb_tx, nb_pkts - nb_tx); + txq->next_to_use += nb_pkts - nb_tx; + + txq->tx_bd_ready -= nb_pkts; + hns3_write_reg_opt(txq->io_tail_reg, nb_pkts); + + return nb_pkts; +} + +uint16_t +hns3_xmit_pkts_vec_sve(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue; + uint16_t ret, new_burst; + uint16_t nb_tx = 0; + + while (nb_pkts) { + new_burst = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = hns3_xmit_fixed_burst_vec_sve(tx_queue, &tx_pkts[nb_tx], + new_burst); + nb_tx += ret; + nb_pkts -= ret; + if (ret < new_burst) + break; + } + + return nb_tx; +} -- 2.9.5