From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id CE797A05D3 for ; Mon, 25 Mar 2019 07:01:58 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C128D4F91; Mon, 25 Mar 2019 07:01:26 +0100 (CET) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id A68D644C3 for ; Mon, 25 Mar 2019 07:01:10 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 24 Mar 2019 23:01:09 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.60,256,1549958400"; d="scan'208";a="154850732" Received: from dpdk26.sh.intel.com ([10.67.110.164]) by fmsmga002.fm.intel.com with ESMTP; 24 Mar 2019 23:01:09 -0700 From: Wenzhuo Lu To: dev@dpdk.org Cc: Wenzhuo Lu Date: Mon, 25 Mar 2019 14:06:32 +0800 Message-Id: <1553493995-29803-6-git-send-email-wenzhuo.lu@intel.com> X-Mailer: git-send-email 1.9.3 In-Reply-To: <1553493995-29803-1-git-send-email-wenzhuo.lu@intel.com> References: <1551340136-83843-1-git-send-email-wenzhuo.lu@intel.com> <1553493995-29803-1-git-send-email-wenzhuo.lu@intel.com> Subject: [dpdk-dev] [PATCH v6 5/8] net/ice: support Tx SSE vector X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Content-Type: text/plain; charset="UTF-8" Message-ID: <20190325060632.5XEjwEbvaoUc6JPHt4RozwJhbpXBBMVbDn3t7TucusY@z> Signed-off-by: Wenzhuo Lu --- doc/guides/nics/features/ice_vec.ini | 2 + drivers/net/ice/ice_rxtx.c | 17 +++++ drivers/net/ice/ice_rxtx.h | 4 + drivers/net/ice/ice_rxtx_vec_common.h | 133 +++++++++++++++++++++++++++++++++ drivers/net/ice/ice_rxtx_vec_sse.c | 135 ++++++++++++++++++++++++++++++++++ 5 files changed, 291 insertions(+) diff --git a/doc/guides/nics/features/ice_vec.ini b/doc/guides/nics/features/ice_vec.ini index 1a19788..173c8f2 100644 --- a/doc/guides/nics/features/ice_vec.ini +++ b/doc/guides/nics/features/ice_vec.ini @@ -12,6 +12,7 @@ Queue start/stop = Y MTU update = Y Jumbo frame = Y Scattered Rx = Y +TSO = Y Promiscuous mode = Y Allmulticast mode = Y Unicast MAC filter = Y @@ -22,6 +23,7 @@ RSS reta update = Y VLAN filter = Y Packet type parsing = Y Rx descriptor status = Y +Tx descriptor status = Y Basic stats = Y Extended stats = Y FW version = Y diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 5409dd0..f9ecffa 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -2332,6 +2332,23 @@ void __attribute__((cold)) { struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); +#ifdef RTE_ARCH_X86 + struct ice_tx_queue *txq; + int i; + + if (!ice_tx_vec_dev_check(dev)) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + (void)ice_txq_vec_setup(txq); + } + PMD_DRV_LOG(DEBUG, "Using Vector Tx (port %d).", + dev->data->port_id); + dev->tx_pkt_burst = ice_xmit_pkts_vec; + dev->tx_pkt_prepare = NULL; + + return; + } +#endif if (ad->tx_simple_allowed) { PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 6ef0a84..1dde4e7 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -170,9 +170,13 @@ void ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev); int ice_rx_vec_dev_check(struct rte_eth_dev *dev); +int ice_tx_vec_dev_check(struct rte_eth_dev *dev); int ice_rxq_vec_setup(struct ice_rx_queue *rxq); +int ice_txq_vec_setup(struct ice_tx_queue *txq); uint16_t ice_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); #endif /* _ICE_RXTX_H_ */ diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h index d41232d..c5f0d56 100644 --- a/drivers/net/ice/ice_rxtx_vec_common.h +++ b/drivers/net/ice/ice_rxtx_vec_common.h @@ -71,6 +71,73 @@ return pkt_idx; } +static __rte_always_inline int +ice_tx_free_bufs(struct ice_tx_queue *txq) +{ + struct ice_tx_entry *txep; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ]; + + /* check DD bits on threshold descriptor */ + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) != + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + n = txq->tx_rs_thresh; + + /* first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1) + */ + txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; + m = rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m)) { + if (likely(m->pool == free[0]->pool)) { + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, + nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m) + rte_mempool_put(m->pool, m); + } + } + + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + +static __rte_always_inline void +ice_tx_backlog_entry(struct ice_tx_entry *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + static inline void _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq) { @@ -106,6 +173,34 @@ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); } +static inline void +_ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq) +{ + uint16_t i; + + if (unlikely(!txq || !txq->sw_ring)) { + PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); + return; + } + + /** + * vPMD tx will not set sw_ring's mbuf to NULL after free, + * so need to free remains more carefully. + */ + i = txq->tx_next_dd - txq->tx_rs_thresh + 1; + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } +} + static inline int ice_rxq_vec_setup_default(struct ice_rx_queue *rxq) { @@ -142,6 +237,29 @@ return 0; } +#define ICE_NO_VECTOR_FLAGS ( \ + DEV_TX_OFFLOAD_MULTI_SEGS | \ + DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_SCTP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ + DEV_TX_OFFLOAD_TCP_CKSUM) + +static inline int +ice_tx_vec_queue_default(struct ice_tx_queue *txq) +{ + if (!txq) + return -1; + + if (txq->offloads & ICE_NO_VECTOR_FLAGS) + return -1; + + if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST || + txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ) + return -1; + + return 0; +} + static inline int ice_rx_vec_dev_check_default(struct rte_eth_dev *dev) { @@ -157,4 +275,19 @@ return 0; } +static inline int +ice_tx_vec_dev_check_default(struct rte_eth_dev *dev) +{ + int i; + struct ice_tx_queue *txq; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (ice_tx_vec_queue_default(txq)) + return -1; + } + + return 0; +} + #endif diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c index 639dc86..3acfaaf 100644 --- a/drivers/net/ice/ice_rxtx_vec_sse.c +++ b/drivers/net/ice/ice_rxtx_vec_sse.c @@ -514,12 +514,131 @@ &split_flags[i]); } +static inline void +ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt, + uint64_t flags) +{ + uint64_t high_qw = + (ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << ICE_TXD_QW1_CMD_S) | + ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_iova + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt, + uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + ice_vtx1(txdp, *pkt, flags); +} + +static uint16_t +ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + volatile struct ice_tx_desc *txdp; + struct ice_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = ICE_TD_CMD; + uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + ice_tx_free_bufs(txq); + + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + nb_commit = nb_pkts; + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + ice_tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + ice_vtx1(txdp, *tx_pkts, flags); + + ice_vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + ice_tx_backlog_entry(txep, tx_pkts, nb_commit); + + ice_vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << + ICE_TXD_QW1_CMD_S); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +uint16_t +ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = ice_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} + static void __attribute__((cold)) ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq) { _ice_rx_queue_release_mbufs_vec(rxq); } +static void __attribute__((cold)) +ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq) +{ + _ice_tx_queue_release_mbufs_vec(txq); +} + int __attribute__((cold)) ice_rxq_vec_setup(struct ice_rx_queue *rxq) { @@ -531,7 +650,23 @@ int __attribute__((cold)) } int __attribute__((cold)) +ice_txq_vec_setup(struct ice_tx_queue __rte_unused *txq) +{ + if (!txq) + return -1; + + txq->tx_rel_mbufs = ice_tx_queue_release_mbufs_vec; + return 0; +} + +int __attribute__((cold)) ice_rx_vec_dev_check(struct rte_eth_dev *dev) { return ice_rx_vec_dev_check_default(dev); } + +int __attribute__((cold)) +ice_tx_vec_dev_check(struct rte_eth_dev *dev) +{ + return ice_tx_vec_dev_check_default(dev); +} -- 1.9.3