From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0A04AA0547; Thu, 21 Oct 2021 11:51:08 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D0AE4411EC; Thu, 21 Oct 2021 11:50:43 +0200 (CEST) Received: from smtpbgeu1.qq.com (smtpbgeu1.qq.com [52.59.177.22]) by mails.dpdk.org (Postfix) with ESMTP id 9A2D1411E3 for ; Thu, 21 Oct 2021 11:50:41 +0200 (CEST) X-QQ-mid: bizesmtp38t1634809835te2shnn1 Received: from jiawenwu.trustnetic.com (unknown [183.129.236.74]) by esmtp6.qq.com (ESMTP) with id ; Thu, 21 Oct 2021 17:50:35 +0800 (CST) X-QQ-SSF: 01400000000000E0I000000A0000000 X-QQ-FEAT: nBdNDrOXbZeN2nUPOSlh94Xt4svwibbe1tuUaj+0M8G0fHppMfnm2q2FG/4eg +TvxyBREx8A0YfzJMqiFTevB/Da9pt7h0fnyJAe/oW3Smy14fHAwSsGivotHP5FLyVW8VSa N9xguxshBDcKauvvKmnuJFJ91WLLuaxjyvLPz309RceZ69O00ejDEpy7OG2+mLyhc5U++Ec gauwvoX3cEqqlS6EQW35lqLEbLXaq+aTGyo74NuoIpTKopIWVPtg6yw6uYWA5FqcwrfrVDl sEde7iUvjW6OTGaMkevhJBS/szXAEX7kOTPGRJsJFmAUFUqrPliRABfLma1wGukiAUJpj8Z mqqgxY2g53kLj+YkM6iklgInlZCg/7c3QITPfPqH/GFlgypshI= X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Thu, 21 Oct 2021 17:50:01 +0800 Message-Id: <20211021095023.18288-5-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.21.0.windows.1 In-Reply-To: <20211021095023.18288-1-jiawenwu@trustnetic.com> References: <20211021095023.18288-1-jiawenwu@trustnetic.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign7 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH v2 04/26] net/ngbe: support TSO X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add transmit datapath with offloads, and support TCP segmentation offload. Signed-off-by: Jiawen Wu --- doc/guides/nics/features/ngbe.ini | 3 + doc/guides/nics/ngbe.rst | 3 +- drivers/net/ngbe/ngbe_ethdev.c | 19 +- drivers/net/ngbe/ngbe_ethdev.h | 6 + drivers/net/ngbe/ngbe_rxtx.c | 679 ++++++++++++++++++++++++++++++ drivers/net/ngbe/ngbe_rxtx.h | 58 +++ 6 files changed, 766 insertions(+), 2 deletions(-) diff --git a/doc/guides/nics/features/ngbe.ini b/doc/guides/nics/features/ngbe.ini index 80c71df0bc..d23e7f084f 100644 --- a/doc/guides/nics/features/ngbe.ini +++ b/doc/guides/nics/features/ngbe.ini @@ -9,8 +9,11 @@ Link status = Y Link status event = Y Queue start/stop = Y Scattered Rx = Y +TSO = Y L3 checksum offload = Y L4 checksum offload = Y +Inner L3 checksum = Y +Inner L4 checksum = Y Packet type parsing = Y Multiprocess aware = Y Linux = Y diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst index 0a14252ff2..6a6ae39243 100644 --- a/doc/guides/nics/ngbe.rst +++ b/doc/guides/nics/ngbe.rst @@ -13,8 +13,9 @@ Features - Packet type information - Checksum offload +- TSO offload - Link state information -- Scattered for RX +- Scattered and gather for TX and RX Prerequisites diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c index 03a07d6cad..ce3f254e33 100644 --- a/drivers/net/ngbe/ngbe_ethdev.c +++ b/drivers/net/ngbe/ngbe_ethdev.c @@ -138,7 +138,8 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) eth_dev->dev_ops = &ngbe_eth_dev_ops; eth_dev->rx_pkt_burst = &ngbe_recv_pkts; - eth_dev->tx_pkt_burst = &ngbe_xmit_pkts_simple; + eth_dev->tx_pkt_burst = &ngbe_xmit_pkts; + eth_dev->tx_pkt_prepare = &ngbe_prep_pkts; /* * For secondary processes, we don't initialise any further as primary @@ -146,6 +147,20 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) * Rx and Tx function. */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + struct ngbe_tx_queue *txq; + /* Tx queue function in primary, set by last queue initialized + * Tx queue may not initialized by primary process + */ + if (eth_dev->data->tx_queues) { + uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues; + txq = eth_dev->data->tx_queues[nb_tx_queues - 1]; + ngbe_set_tx_function(eth_dev, txq); + } else { + /* Use default Tx function if we get here */ + PMD_INIT_LOG(NOTICE, + "No Tx queues configured yet. Using default Tx function."); + } + ngbe_set_rx_function(eth_dev); return 0; @@ -641,6 +656,8 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_pktlen = 15872; dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) | dev_info->rx_queue_offload_capa); + dev_info->tx_queue_offload_capa = 0; + dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev); dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h index 80da1938c9..7b085f070f 100644 --- a/drivers/net/ngbe/ngbe_ethdev.h +++ b/drivers/net/ngbe/ngbe_ethdev.h @@ -114,9 +114,15 @@ uint16_t ngbe_recv_pkts_sc_single_alloc(void *rx_queue, uint16_t ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + uint16_t ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t ngbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + void ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c index d3fcd1c23d..d6a5b1d895 100644 --- a/drivers/net/ngbe/ngbe_rxtx.c +++ b/drivers/net/ngbe/ngbe_rxtx.c @@ -9,12 +9,26 @@ #include #include #include +#include #include "ngbe_logs.h" #include "base/ngbe.h" #include "ngbe_ethdev.h" #include "ngbe_rxtx.h" +/* Bit Mask to indicate what bits required for building Tx context */ +static const u64 NGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM | + PKT_TX_OUTER_IPV6 | + PKT_TX_OUTER_IPV4 | + PKT_TX_IPV6 | + PKT_TX_IPV4 | + PKT_TX_L4_MASK | + PKT_TX_TCP_SEG | + PKT_TX_TUNNEL_MASK | + PKT_TX_OUTER_IP_CKSUM); +#define NGBE_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ NGBE_TX_OFFLOAD_MASK) + /* * Prefetch a cache line into all cache levels. */ @@ -248,6 +262,614 @@ ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } +static inline void +ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq, + volatile struct ngbe_tx_ctx_desc *ctx_txd, + uint64_t ol_flags, union ngbe_tx_offload tx_offload) +{ + union ngbe_tx_offload tx_offload_mask; + uint32_t type_tucmd_mlhl; + uint32_t mss_l4len_idx; + uint32_t ctx_idx; + uint32_t vlan_macip_lens; + uint32_t tunnel_seed; + + ctx_idx = txq->ctx_curr; + tx_offload_mask.data[0] = 0; + tx_offload_mask.data[1] = 0; + + /* Specify which HW CTX to upload. */ + mss_l4len_idx = NGBE_TXD_IDX(ctx_idx); + type_tucmd_mlhl = NGBE_TXD_CTXT; + + tx_offload_mask.ptid |= ~0; + type_tucmd_mlhl |= NGBE_TXD_PTID(tx_offload.ptid); + + /* check if TCP segmentation required for this packet */ + if (ol_flags & PKT_TX_TCP_SEG) { + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + tx_offload_mask.l4_len |= ~0; + tx_offload_mask.tso_segsz |= ~0; + mss_l4len_idx |= NGBE_TXD_MSS(tx_offload.tso_segsz); + mss_l4len_idx |= NGBE_TXD_L4LEN(tx_offload.l4_len); + } else { /* no TSO, check if hardware checksum is needed */ + if (ol_flags & PKT_TX_IP_CKSUM) { + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + } + + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + mss_l4len_idx |= + NGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr)); + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + break; + case PKT_TX_TCP_CKSUM: + mss_l4len_idx |= + NGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr)); + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + break; + case PKT_TX_SCTP_CKSUM: + mss_l4len_idx |= + NGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr)); + tx_offload_mask.l2_len |= ~0; + tx_offload_mask.l3_len |= ~0; + break; + default: + break; + } + } + + vlan_macip_lens = NGBE_TXD_IPLEN(tx_offload.l3_len >> 1); + + if (ol_flags & PKT_TX_TUNNEL_MASK) { + tx_offload_mask.outer_tun_len |= ~0; + tx_offload_mask.outer_l2_len |= ~0; + tx_offload_mask.outer_l3_len |= ~0; + tx_offload_mask.l2_len |= ~0; + tunnel_seed = NGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1); + tunnel_seed |= NGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2); + + switch (ol_flags & PKT_TX_TUNNEL_MASK) { + case PKT_TX_TUNNEL_IPIP: + /* for non UDP / GRE tunneling, set to 0b */ + break; + default: + PMD_TX_LOG(ERR, "Tunnel type not supported"); + return; + } + vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.outer_l2_len); + } else { + tunnel_seed = 0; + vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len); + } + + txq->ctx_cache[ctx_idx].flags = ol_flags; + txq->ctx_cache[ctx_idx].tx_offload.data[0] = + tx_offload_mask.data[0] & tx_offload.data[0]; + txq->ctx_cache[ctx_idx].tx_offload.data[1] = + tx_offload_mask.data[1] & tx_offload.data[1]; + txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask; + + ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens); + ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed); + ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl); + ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx); +} + +/* + * Check which hardware context can be used. Use the existing match + * or create a new context descriptor. + */ +static inline uint32_t +what_ctx_update(struct ngbe_tx_queue *txq, uint64_t flags, + union ngbe_tx_offload tx_offload) +{ + /* If match with the current used context */ + if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] + & tx_offload.data[0])) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] + & tx_offload.data[1])))) + return txq->ctx_curr; + + /* What if match with the next context */ + txq->ctx_curr ^= 1; + if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] + & tx_offload.data[0])) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] + & tx_offload.data[1])))) + return txq->ctx_curr; + + /* Mismatch, use the previous context */ + return NGBE_CTX_NUM; +} + +static inline uint32_t +tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags) +{ + uint32_t tmp = 0; + + if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) { + tmp |= NGBE_TXD_CC; + tmp |= NGBE_TXD_L4CS; + } + if (ol_flags & PKT_TX_IP_CKSUM) { + tmp |= NGBE_TXD_CC; + tmp |= NGBE_TXD_IPCS; + } + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) { + tmp |= NGBE_TXD_CC; + tmp |= NGBE_TXD_EIPCS; + } + if (ol_flags & PKT_TX_TCP_SEG) { + tmp |= NGBE_TXD_CC; + /* implies IPv4 cksum */ + if (ol_flags & PKT_TX_IPV4) + tmp |= NGBE_TXD_IPCS; + tmp |= NGBE_TXD_L4CS; + } + + return tmp; +} + +static inline uint32_t +tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) +{ + uint32_t cmdtype = 0; + + if (ol_flags & PKT_TX_TCP_SEG) + cmdtype |= NGBE_TXD_TSE; + return cmdtype; +} + +static inline uint8_t +tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype) +{ + bool tun; + + if (ptype) + return ngbe_encode_ptype(ptype); + + /* Only support flags in NGBE_TX_OFFLOAD_MASK */ + tun = !!(oflags & PKT_TX_TUNNEL_MASK); + + /* L2 level */ + ptype = RTE_PTYPE_L2_ETHER; + + /* L3 level */ + if (oflags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM)) + ptype |= RTE_PTYPE_L3_IPV4; + else if (oflags & (PKT_TX_OUTER_IPV6)) + ptype |= RTE_PTYPE_L3_IPV6; + + if (oflags & (PKT_TX_IPV4 | PKT_TX_IP_CKSUM)) + ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4); + else if (oflags & (PKT_TX_IPV6)) + ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6); + + /* L4 level */ + switch (oflags & (PKT_TX_L4_MASK)) { + case PKT_TX_TCP_CKSUM: + ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP); + break; + case PKT_TX_UDP_CKSUM: + ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP); + break; + case PKT_TX_SCTP_CKSUM: + ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP); + break; + } + + if (oflags & PKT_TX_TCP_SEG) + ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP); + + /* Tunnel */ + switch (oflags & PKT_TX_TUNNEL_MASK) { + case PKT_TX_TUNNEL_IPIP: + case PKT_TX_TUNNEL_IP: + ptype |= RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_TUNNEL_IP; + break; + } + + return ngbe_encode_ptype(ptype); +} + +/* Reset transmit descriptors after they have been used */ +static inline int +ngbe_xmit_cleanup(struct ngbe_tx_queue *txq) +{ + struct ngbe_tx_entry *sw_ring = txq->sw_ring; + volatile struct ngbe_tx_desc *txr = txq->tx_ring; + uint16_t last_desc_cleaned = txq->last_desc_cleaned; + uint16_t nb_tx_desc = txq->nb_tx_desc; + uint16_t desc_to_clean_to; + uint16_t nb_tx_to_clean; + uint32_t status; + + /* Determine the last descriptor needing to be cleaned */ + desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh); + if (desc_to_clean_to >= nb_tx_desc) + desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); + + /* Check to make sure the last descriptor to clean is done */ + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; + status = txr[desc_to_clean_to].dw3; + if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) { + PMD_TX_LOG(DEBUG, + "Tx descriptor %4u is not done" + "(port=%d queue=%d)", + desc_to_clean_to, + txq->port_id, txq->queue_id); + if (txq->nb_tx_free >> 1 < txq->tx_free_thresh) + ngbe_set32_masked(txq->tdc_reg_addr, + NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH); + /* Failed to clean any descriptors, better luck next time */ + return -(1); + } + + /* Figure out how many descriptors will be cleaned */ + if (last_desc_cleaned > desc_to_clean_to) + nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + + desc_to_clean_to); + else + nb_tx_to_clean = (uint16_t)(desc_to_clean_to - + last_desc_cleaned); + + PMD_TX_LOG(DEBUG, + "Cleaning %4u Tx descriptors: %4u to %4u (port=%d queue=%d)", + nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to, + txq->port_id, txq->queue_id); + + /* + * The last descriptor to clean is done, so that means all the + * descriptors from the last descriptor that was cleaned + * up to the last descriptor with the RS bit set + * are done. Only reset the threshold descriptor. + */ + txr[desc_to_clean_to].dw3 = 0; + + /* Update the txq to reflect the last descriptor that was cleaned */ + txq->last_desc_cleaned = desc_to_clean_to; + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); + + /* No Error */ + return 0; +} + +uint16_t +ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ngbe_tx_queue *txq; + struct ngbe_tx_entry *sw_ring; + struct ngbe_tx_entry *txe, *txn; + volatile struct ngbe_tx_desc *txr; + volatile struct ngbe_tx_desc *txd; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint64_t buf_dma_addr; + uint32_t olinfo_status; + uint32_t cmd_type_len; + uint32_t pkt_len; + uint16_t slen; + uint64_t ol_flags; + uint16_t tx_id; + uint16_t tx_last; + uint16_t nb_tx; + uint16_t nb_used; + uint64_t tx_ol_req; + uint32_t ctx = 0; + uint32_t new_ctx; + union ngbe_tx_offload tx_offload; + + tx_offload.data[0] = 0; + tx_offload.data[1] = 0; + txq = tx_queue; + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + /* Determine if the descriptor ring needs to be cleaned. */ + if (txq->nb_tx_free < txq->tx_free_thresh) + ngbe_xmit_cleanup(txq); + + rte_prefetch0(&txe->mbuf->pool); + + /* Tx loop */ + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + new_ctx = 0; + tx_pkt = *tx_pkts++; + pkt_len = tx_pkt->pkt_len; + + /* + * Determine how many (if any) context descriptors + * are needed for offload functionality. + */ + ol_flags = tx_pkt->ol_flags; + + /* If hardware offload required */ + tx_ol_req = ol_flags & NGBE_TX_OFFLOAD_MASK; + if (tx_ol_req) { + tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req, + tx_pkt->packet_type); + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + tx_offload.outer_l2_len = tx_pkt->outer_l2_len; + tx_offload.outer_l3_len = tx_pkt->outer_l3_len; + tx_offload.outer_tun_len = 0; + + /* If new context need be built or reuse the exist ctx*/ + ctx = what_ctx_update(txq, tx_ol_req, tx_offload); + /* Only allocate context descriptor if required */ + new_ctx = (ctx == NGBE_CTX_NUM); + ctx = txq->ctx_curr; + } + + /* + * Keep track of how many descriptors are used this loop + * This will always be the number of segments + the number of + * Context descriptors required to transmit the packet + */ + nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx); + + /* + * The number of descriptors that must be allocated for a + * packet is the number of segments of that packet, plus 1 + * Context Descriptor for the hardware offload, if any. + * Determine the last Tx descriptor to allocate in the Tx ring + * for the packet, starting from the current position (tx_id) + * in the ring. + */ + tx_last = (uint16_t)(tx_id + nb_used - 1); + + /* Circular ring */ + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t)(tx_last - txq->nb_tx_desc); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" + " tx_first=%u tx_last=%u", + (uint16_t)txq->port_id, + (uint16_t)txq->queue_id, + (uint32_t)pkt_len, + (uint16_t)tx_id, + (uint16_t)tx_last); + + /* + * Make sure there are enough Tx descriptors available to + * transmit the entire packet. + * nb_used better be less than or equal to txq->tx_free_thresh + */ + if (nb_used > txq->nb_tx_free) { + PMD_TX_LOG(DEBUG, + "Not enough free Tx descriptors " + "nb_used=%4u nb_free=%4u " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->port_id, txq->queue_id); + + if (ngbe_xmit_cleanup(txq) != 0) { + /* Could not clean any descriptors */ + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + + /* nb_used better be <= txq->tx_free_thresh */ + if (unlikely(nb_used > txq->tx_free_thresh)) { + PMD_TX_LOG(DEBUG, + "The number of descriptors needed to " + "transmit the packet exceeds the " + "RS bit threshold. This will impact " + "performance." + "nb_used=%4u nb_free=%4u " + "tx_free_thresh=%4u. " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->tx_free_thresh, + txq->port_id, txq->queue_id); + /* + * Loop here until there are enough Tx + * descriptors or until the ring cannot be + * cleaned. + */ + while (nb_used > txq->nb_tx_free) { + if (ngbe_xmit_cleanup(txq) != 0) { + /* + * Could not clean any + * descriptors + */ + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + } + } + } + + /* + * By now there are enough free Tx descriptors to transmit + * the packet. + */ + + /* + * Set common flags of all Tx Data Descriptors. + * + * The following bits must be set in the first Data Descriptor + * and are ignored in the other ones: + * - NGBE_TXD_FCS + * + * The following bits must only be set in the last Data + * Descriptor: + * - NGBE_TXD_EOP + */ + cmd_type_len = NGBE_TXD_FCS; + + olinfo_status = 0; + if (tx_ol_req) { + if (ol_flags & PKT_TX_TCP_SEG) { + /* when TSO is on, paylen in descriptor is the + * not the packet len but the tcp payload len + */ + pkt_len -= (tx_offload.l2_len + + tx_offload.l3_len + tx_offload.l4_len); + pkt_len -= + (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK) + ? tx_offload.outer_l2_len + + tx_offload.outer_l3_len : 0; + } + + /* + * Setup the Tx Context Descriptor if required + */ + if (new_ctx) { + volatile struct ngbe_tx_ctx_desc *ctx_txd; + + ctx_txd = (volatile struct ngbe_tx_ctx_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + rte_prefetch0(&txn->mbuf->pool); + + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + ngbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, + tx_offload); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + /* + * Setup the Tx Data Descriptor, + * This path will go through + * whatever new/reuse the context descriptor + */ + cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags); + olinfo_status |= + tx_desc_cksum_flags_to_olinfo(ol_flags); + olinfo_status |= NGBE_TXD_IDX(ctx); + } + + olinfo_status |= NGBE_TXD_PAYLEN(pkt_len); + + m_seg = tx_pkt; + do { + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + rte_prefetch0(&txn->mbuf->pool); + + if (txe->mbuf != NULL) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* + * Set up Transmit Data Descriptor. + */ + slen = m_seg->data_len; + buf_dma_addr = rte_mbuf_data_iova(m_seg); + txd->qw0 = rte_cpu_to_le_64(buf_dma_addr); + txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen); + txd->dw3 = rte_cpu_to_le_32(olinfo_status); + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->next; + } while (m_seg != NULL); + + /* + * The last packet data descriptor needs End Of Packet (EOP) + */ + cmd_type_len |= NGBE_TXD_EOP; + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used); + + txd->dw2 |= rte_cpu_to_le_32(cmd_type_len); + } + +end_of_tx: + + rte_wmb(); + + /* + * Set the Transmit Descriptor Tail (TDT) + */ + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + (uint16_t)txq->port_id, (uint16_t)txq->queue_id, + (uint16_t)tx_id, (uint16_t)nb_tx); + ngbe_set32_relaxed(txq->tdt_reg_addr, tx_id); + txq->tx_tail = tx_id; + + return nb_tx; +} + +/********************************************************************* + * + * Tx prep functions + * + **********************************************************************/ +uint16_t +ngbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i, ret; + uint64_t ol_flags; + struct rte_mbuf *m; + struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + + /** + * Check if packet meets requirements for number of segments + * + * NOTE: for ngbe it's always (40 - WTHRESH) for both TSO and + * non-TSO + */ + + if (m->nb_segs > NGBE_TX_MAX_SEG - txq->wthresh) { + rte_errno = -EINVAL; + return i; + } + + if (ol_flags & NGBE_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = -ENOTSUP; + return i; + } + +#ifdef RTE_ETHDEV_DEBUG_TX + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = ret; + return i; + } + } + + return i; +} + /********************************************************************* * * Rx functions @@ -1040,6 +1662,56 @@ static const struct ngbe_txq_ops def_txq_ops = { .reset = ngbe_reset_tx_queue, }; +/* Takes an ethdev and a queue and sets up the tx function to be used based on + * the queue parameters. Used in tx_queue_setup by primary process and then + * in dev_init by secondary process when attaching to an existing ethdev. + */ +void +ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq) +{ + /* Use a simple Tx queue (no offloads, no multi segs) if possible */ + if (txq->offloads == 0 && + txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST) { + PMD_INIT_LOG(DEBUG, "Using simple tx code path"); + dev->tx_pkt_burst = ngbe_xmit_pkts_simple; + dev->tx_pkt_prepare = NULL; + } else { + PMD_INIT_LOG(DEBUG, "Using full-featured tx code path"); + PMD_INIT_LOG(DEBUG, + " - offloads = 0x%" PRIx64, + txq->offloads); + PMD_INIT_LOG(DEBUG, + " - tx_free_thresh = %lu [RTE_PMD_NGBE_TX_MAX_BURST=%lu]", + (unsigned long)txq->tx_free_thresh, + (unsigned long)RTE_PMD_NGBE_TX_MAX_BURST); + dev->tx_pkt_burst = ngbe_xmit_pkts; + dev->tx_pkt_prepare = ngbe_prep_pkts; + } +} + +uint64_t +ngbe_get_tx_port_offloads(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + + RTE_SET_USED(dev); + + tx_offload_capa = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_UDP_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; + + return tx_offload_capa; +} + int ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1051,10 +1723,13 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq; struct ngbe_hw *hw; uint16_t tx_free_thresh; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); hw = ngbe_dev_hw(dev); + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + /* * The Tx descriptor ring will be cleaned after txq->tx_free_thresh * descriptors are used or if the number of descriptors required @@ -1116,6 +1791,7 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->queue_id = queue_idx; txq->reg_idx = queue_idx; txq->port_id = dev->data->port_id; + txq->offloads = offloads; txq->ops = &def_txq_ops; txq->tx_deferred_start = tx_conf->tx_deferred_start; @@ -1137,6 +1813,9 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64, txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + /* set up scalar Tx function as appropriate */ + ngbe_set_tx_function(dev, txq); + txq->ops->reset(txq); dev->data->tx_queues[queue_idx] = txq; diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h index e59e6c461d..eb86a79760 100644 --- a/drivers/net/ngbe/ngbe_rxtx.h +++ b/drivers/net/ngbe/ngbe_rxtx.h @@ -135,8 +135,35 @@ struct ngbe_tx_ctx_desc { rte_le32_t dw3; /* w.mss_l4len_idx */ }; +/* @ngbe_tx_ctx_desc.dw0 */ +#define NGBE_TXD_IPLEN(v) LS(v, 0, 0x1FF) /* ip/fcoe header end */ +#define NGBE_TXD_MACLEN(v) LS(v, 9, 0x7F) /* desc mac len */ +#define NGBE_TXD_VLAN(v) LS(v, 16, 0xFFFF) /* vlan tag */ + +/* @ngbe_tx_ctx_desc.dw1 */ +/*** bit 0-31, when NGBE_TXD_DTYP_FCOE=0 ***/ +#define NGBE_TXD_IPSEC_SAIDX(v) LS(v, 0, 0x3FF) /* ipsec SA index */ +#define NGBE_TXD_ETYPE(v) LS(v, 11, 0x1) /* tunnel type */ +#define NGBE_TXD_ETYPE_UDP LS(0, 11, 0x1) +#define NGBE_TXD_ETYPE_GRE LS(1, 11, 0x1) +#define NGBE_TXD_EIPLEN(v) LS(v, 12, 0x7F) /* tunnel ip header */ +#define NGBE_TXD_DTYP_FCOE MS(16, 0x1) /* FCoE/IP descriptor */ +#define NGBE_TXD_ETUNLEN(v) LS(v, 21, 0xFF) /* tunnel header */ +#define NGBE_TXD_DECTTL(v) LS(v, 29, 0xF) /* decrease ip TTL */ + +/* @ngbe_tx_ctx_desc.dw2 */ +#define NGBE_TXD_IPSEC_ESPLEN(v) LS(v, 1, 0x1FF) /* ipsec ESP length */ +#define NGBE_TXD_SNAP MS(10, 0x1) /* SNAP indication */ +#define NGBE_TXD_TPID_SEL(v) LS(v, 11, 0x7) /* vlan tag index */ +#define NGBE_TXD_IPSEC_ESP MS(14, 0x1) /* ipsec type: esp=1 ah=0 */ +#define NGBE_TXD_IPSEC_ESPENC MS(15, 0x1) /* ESP encrypt */ +#define NGBE_TXD_CTXT MS(20, 0x1) /* context descriptor */ +#define NGBE_TXD_PTID(v) LS(v, 24, 0xFF) /* packet type */ /* @ngbe_tx_ctx_desc.dw3 */ #define NGBE_TXD_DD MS(0, 0x1) /* descriptor done */ +#define NGBE_TXD_IDX(v) LS(v, 4, 0x1) /* ctxt desc index */ +#define NGBE_TXD_L4LEN(v) LS(v, 8, 0xFF) /* l4 header length */ +#define NGBE_TXD_MSS(v) LS(v, 16, 0xFFFF) /* l4 MSS */ /** * Transmit Data Descriptor (NGBE_TXD_TYP=DATA) @@ -256,11 +283,34 @@ enum ngbe_ctx_num { NGBE_CTX_NUM = 2, /**< CTX NUMBER */ }; +/** Offload features */ +union ngbe_tx_offload { + uint64_t data[2]; + struct { + uint64_t ptid:8; /**< Packet Type Identifier. */ + uint64_t l2_len:7; /**< L2 (MAC) Header Length. */ + uint64_t l3_len:9; /**< L3 (IP) Header Length. */ + uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */ + uint64_t tso_segsz:16; /**< TCP TSO segment size */ + uint64_t vlan_tci:16; + /**< VLAN Tag Control Identifier (CPU order). */ + + /* fields for TX offloading of tunnels */ + uint64_t outer_tun_len:8; /**< Outer TUN (Tunnel) Hdr Length. */ + uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */ + uint64_t outer_l3_len:16; /**< Outer L3 (IP) Hdr Length. */ + }; +}; + /** * Structure to check if new context need be built */ struct ngbe_ctx_info { uint64_t flags; /**< ol_flags for context build. */ + /**< tx offload: vlan, tso, l2-l3-l4 lengths. */ + union ngbe_tx_offload tx_offload; + /** compare mask for tx offload. */ + union ngbe_tx_offload tx_offload_mask; }; /** @@ -292,6 +342,7 @@ struct ngbe_tx_queue { uint8_t pthresh; /**< Prefetch threshold register */ uint8_t hthresh; /**< Host threshold register */ uint8_t wthresh; /**< Write-back threshold reg */ + uint64_t offloads; /**< Tx offload flags */ uint32_t ctx_curr; /**< Hardware context states */ /** Hardware context0 history */ struct ngbe_ctx_info ctx_cache[NGBE_CTX_NUM]; @@ -306,8 +357,15 @@ struct ngbe_txq_ops { void (*reset)(struct ngbe_tx_queue *txq); }; +/* Takes an ethdev and a queue and sets up the tx function to be used based on + * the queue parameters. Used in tx_queue_setup by primary process and then + * in dev_init by secondary process when attaching to an existing ethdev. + */ +void ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq); + void ngbe_set_rx_function(struct rte_eth_dev *dev); +uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev); uint64_t ngbe_get_rx_port_offloads(struct rte_eth_dev *dev); #endif /* _NGBE_RXTX_H_ */ -- 2.21.0.windows.1