From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4DB16A0560; Tue, 18 Oct 2022 13:15:34 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id CD22442B74; Tue, 18 Oct 2022 13:14:36 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id 1207A42B74 for ; Tue, 18 Oct 2022 13:14:32 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1666091673; x=1697627673; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=qG3amgtaHfYIp//RhkDE+ppxdKDTbVU02JNLXDLPE9I=; b=OMXmgVzKzOKS1WSPERY8DvHfTO7nxRAOsiMEbWVZlMD+UMEoFYeKzL90 D+mP9PQvVEm4hE8Kjsmlw80/469T8V6ZKC8IbzBX6jM1TJQGmWA1RWQix +n8Zb4IPfLP4S2gBunCkzh1fZoMPQDOIFO1YzTjGprr70+UCDZdlwB0Jp 0l+tHzPuWOL7ugiJBvxb9rsPc1zLNYhJKIN3Wu3ZvhDTstk74t7WE/tnt scXfW3sTLlRUwlk96Rb73Ew9n3kcMD6wxRrQJXy1CZ+TB7gZZFufFY148 eYcl2888Y+Kb6R1EXftLnRtpJoxqCy+2cUt27VAO8L/oYUySbZ1+b+6Gk Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10503"; a="293441589" X-IronPort-AV: E=Sophos;i="5.95,193,1661842800"; d="scan'208";a="293441589" Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Oct 2022 04:14:32 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10503"; a="717884227" X-IronPort-AV: E=Sophos;i="5.95,193,1661842800"; d="scan'208";a="717884227" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by FMSMGA003.fm.intel.com with ESMTP; 18 Oct 2022 04:14:30 -0700 From: Junfeng Guo To: andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Cc: dev@dpdk.org, junfeng.guo@intel.com, Xiaoyun Li Subject: [PATCH v3 10/15] net/idpf: add support for Rx/Tx offloading Date: Tue, 18 Oct 2022 19:12:40 +0800 Message-Id: <20221018111245.890651-11-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20221018111245.890651-1-junfeng.guo@intel.com> References: <20220905105828.3190335-1-junfeng.guo@intel.com> <20221018111245.890651-1-junfeng.guo@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add Rx/Tx offloading support, including TSO and CHKSUM. Signed-off-by: Beilei Xing Signed-off-by: Xiaoyun Li Signed-off-by: Junfeng Guo --- doc/guides/nics/features/idpf.ini | 3 + drivers/net/idpf/idpf_ethdev.c | 10 ++ drivers/net/idpf/idpf_rxtx.c | 237 +++++++++++++++++++++++++++++- drivers/net/idpf/idpf_rxtx.h | 19 +++ 4 files changed, 267 insertions(+), 2 deletions(-) diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini index f1849cd821..23953c384d 100644 --- a/doc/guides/nics/features/idpf.ini +++ b/doc/guides/nics/features/idpf.ini @@ -10,6 +10,9 @@ Queue start/stop = Y Runtime Rx queue setup = Y Runtime Tx queue setup = Y +TSO = P +L3 checksum offload = P +L4 checksum offload = P Packet type parsing = Y Multiprocess aware = Y FreeBSD = Y diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index c6a2c85f17..d09de4075c 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -86,6 +86,16 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_mac_addrs = IDPF_NUM_MACADDR_MAX; dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + dev_info->rx_offload_capa = + RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM; + + dev_info->tx_offload_capa = + RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = IDPF_DEFAULT_RX_FREE_THRESH, diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c index b2c2dca0a8..ff3b710f4d 100644 --- a/drivers/net/idpf/idpf_rxtx.c +++ b/drivers/net/idpf/idpf_rxtx.c @@ -1269,6 +1269,47 @@ idpf_stop_queues(struct rte_eth_dev *dev) } } +#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S \ + (BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) | \ + BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) | \ + BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) | \ + BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) + +static inline uint64_t +idpf_splitq_rx_csum_offload(uint8_t err) +{ + uint64_t flags = 0; + + if (unlikely(!(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)))) + return flags; + + if (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) { + flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | + RTE_MBUF_F_RX_L4_CKSUM_GOOD); + return flags; + } + + if (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S))) + flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + + if (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S))) + flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + + if (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S))) + flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; + + if (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))) + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD; + + return flags; +} + static void idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq) { @@ -1342,9 +1383,11 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pktlen_gen_bufq_id; struct idpf_rx_queue *rxq; const uint32_t *ptype_tbl; + uint8_t status_err0_qw1; struct rte_mbuf *rxm; uint16_t rx_id_bufq1; uint16_t rx_id_bufq2; + uint64_t pkt_flags; uint16_t pkt_len; uint16_t bufq_id; uint16_t gen_id; @@ -1419,6 +1462,11 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >> VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S]; + status_err0_qw1 = rx_desc->status_err0_qw1; + pkt_flags = idpf_splitq_rx_csum_offload(status_err0_qw1); + + rxm->ol_flags |= pkt_flags; + rx_pkts[nb_rx++] = rxm; } @@ -1495,6 +1543,49 @@ idpf_split_tx_free(struct idpf_tx_queue *cq) cq->tx_tail = next; } +/* Check if the context descriptor is needed for TX offloading */ +static inline uint16_t +idpf_calc_context_desc(uint64_t flags) +{ + if (flags & RTE_MBUF_F_TX_TCP_SEG) + return 1; + + return 0; +} + +/* set TSO context descriptor + */ +static inline void +idpf_set_splitq_tso_ctx(struct rte_mbuf *mbuf, + union idpf_tx_offload tx_offload, + volatile union idpf_flex_tx_ctx_desc *ctx_desc) +{ + uint16_t cmd_dtype; + uint32_t tso_len; + uint8_t hdr_len; + + if (!tx_offload.l4_len) { + PMD_TX_LOG(DEBUG, "L4 length set to 0"); + return; + } + + hdr_len = tx_offload.l2_len + + tx_offload.l3_len + + tx_offload.l4_len; + cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX | + IDPF_TX_FLEX_CTX_DESC_CMD_TSO; + tso_len = mbuf->pkt_len - hdr_len; + + ctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype); + ctx_desc->tso.qw0.hdr_len = hdr_len; + ctx_desc->tso.qw0.mss_rt = + rte_cpu_to_le_16((uint16_t)mbuf->tso_segsz & + IDPF_TXD_FLEX_CTX_MSS_RT_M); + ctx_desc->tso.qw0.flex_tlen = + rte_cpu_to_le_32(tso_len & + IDPF_TXD_FLEX_CTX_MSS_RT_M); +} + uint16_t idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -1503,11 +1594,14 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, volatile struct idpf_flex_tx_sched_desc *txr; volatile struct idpf_flex_tx_sched_desc *txd; struct idpf_tx_entry *sw_ring; + union idpf_tx_offload tx_offload = {0}; struct idpf_tx_entry *txe, *txn; uint16_t nb_used, tx_id, sw_id; struct rte_mbuf *tx_pkt; uint16_t nb_to_clean; uint16_t nb_tx = 0; + uint64_t ol_flags; + uint16_t nb_ctx; if (unlikely(!txq) || unlikely(!txq->q_started)) return nb_tx; @@ -1537,8 +1631,29 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (txq->nb_free < tx_pkt->nb_segs) break; - nb_used = tx_pkt->nb_segs; + ol_flags = tx_pkt->ol_flags; + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + /* Calculate the number of context descriptors needed. */ + nb_ctx = idpf_calc_context_desc(ol_flags); + nb_used = tx_pkt->nb_segs + nb_ctx; + + /* context descriptor */ + if (nb_ctx) { + volatile union idpf_flex_tx_ctx_desc *ctx_desc = + (volatile union idpf_flex_tx_ctx_desc *)&txr[tx_id]; + + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) + idpf_set_splitq_tso_ctx(tx_pkt, tx_offload, + ctx_desc); + + tx_id++; + if (tx_id == txq->nb_tx_desc) + tx_id = 0; + } do { txd = &txr[tx_id]; @@ -1565,6 +1680,8 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (unlikely(!(tx_id % 32))) txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_RE; + if (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK) + txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN; txq->nb_free = (uint16_t)(txq->nb_free - nb_used); txq->nb_used = (uint16_t)(txq->nb_used + nb_used); } @@ -1579,6 +1696,48 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } +#define IDPF_RX_FLEX_DESC_STATUS0_XSUM_S \ + (BIT(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \ + BIT(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \ + BIT(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \ + BIT(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)) + +/* Translate the rx descriptor status and error fields to pkt flags */ +static inline uint64_t +idpf_rxd_to_pkt_flags(uint16_t status_error) +{ + uint64_t flags = 0; + + if (unlikely(!(status_error & BIT(VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_S)))) + return flags; + + if (likely((status_error & IDPF_RX_FLEX_DESC_STATUS0_XSUM_S) == 0)) { + flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | + RTE_MBUF_F_RX_L4_CKSUM_GOOD); + return flags; + } + + if (unlikely(status_error & BIT(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))) + flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + + if (unlikely(status_error & BIT(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))) + flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + + if (unlikely(status_error & BIT(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))) + flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; + + if (unlikely(status_error & BIT(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD; + + return flags; +} + static inline void idpf_update_rx_tail(struct idpf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id) @@ -1612,6 +1771,7 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct rte_mbuf *rxm; struct rte_mbuf *nmb; uint16_t rx_status0; + uint64_t pkt_flags; uint64_t dma_addr; uint16_t nb_rx; @@ -1681,10 +1841,13 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->data_len = rx_packet_len; rxm->port = rxq->port_id; rxm->ol_flags = 0; + pkt_flags = idpf_rxd_to_pkt_flags(rx_status0); rxm->packet_type = ptype_tbl[(uint8_t)(rte_cpu_to_le_16(rxd.flex_nic_wb.ptype_flex_flags0) & VIRTCHNL2_RX_FLEX_DESC_PTYPE_M)]; + rxm->ol_flags |= pkt_flags; + rx_pkts[nb_rx++] = rxm; } @@ -1740,6 +1903,31 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq) return 0; } +/* set TSO context descriptor + * support IP -> L4 and IP -> IP -> L4 + */ +static inline uint64_t +idpf_set_tso_ctx(struct rte_mbuf *mbuf, union idpf_tx_offload tx_offload) +{ + uint64_t ctx_desc = 0; + uint32_t cd_cmd, hdr_len, cd_tso_len; + + if (!tx_offload.l4_len) { + PMD_TX_LOG(DEBUG, "L4 length set to 0"); + return ctx_desc; + } + + hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len; + + cd_cmd = IDPF_TX_CTX_DESC_TSO; + cd_tso_len = mbuf->pkt_len - hdr_len; + ctx_desc |= ((uint64_t)cd_cmd << IDPF_TXD_CTX_QW1_CMD_S) | + ((uint64_t)cd_tso_len << IDPF_TXD_CTX_QW1_TSO_LEN_S) | + ((uint64_t)mbuf->tso_segsz << IDPF_TXD_CTX_QW1_MSS_S); + + return ctx_desc; +} + /* TX function */ uint16_t idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, @@ -1747,14 +1935,17 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, { volatile struct idpf_flex_tx_desc *txd; volatile struct idpf_flex_tx_desc *txr; + union idpf_tx_offload tx_offload = {0}; struct idpf_tx_entry *txe, *txn; struct idpf_tx_entry *sw_ring; struct idpf_tx_queue *txq; struct rte_mbuf *tx_pkt; struct rte_mbuf *m_seg; uint64_t buf_dma_addr; + uint64_t ol_flags; uint16_t tx_last; uint16_t nb_used; + uint16_t nb_ctx; uint16_t td_cmd; uint16_t tx_id; uint16_t nb_tx; @@ -1781,11 +1972,19 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_pkt = *tx_pkts++; RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + ol_flags = tx_pkt->ol_flags; + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + /* Calculate the number of context descriptors needed. */ + nb_ctx = idpf_calc_context_desc(ol_flags); + /* The number of descriptors that must be allocated for * a packet equals to the number of the segments of that * packet plus 1 context descriptor if needed. */ - nb_used = (uint16_t)(tx_pkt->nb_segs); + nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); tx_last = (uint16_t)(tx_id + nb_used - 1); /* Circular ring */ @@ -1813,6 +2012,28 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } } + if (nb_ctx) { + /* Setup TX context descriptor if required */ + volatile union idpf_flex_tx_ctx_desc *ctx_txd = + (volatile union idpf_flex_tx_ctx_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + if (txe->mbuf) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + /* TSO enabled */ + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) + idpf_set_splitq_tso_ctx(tx_pkt, tx_offload, + ctx_txd); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } m_seg = tx_pkt; do { @@ -1895,11 +2116,23 @@ idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } + if (ol_flags & IDPF_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = ENOTSUP; + return i; + } + if (m->pkt_len < IDPF_MIN_FRAME_SIZE) { rte_errno = EINVAL; return i; } +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { rte_errno = -ret; diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h index bd3ebe2f50..a2394ffa2c 100644 --- a/drivers/net/idpf/idpf_rxtx.h +++ b/drivers/net/idpf/idpf_rxtx.h @@ -44,6 +44,25 @@ #define IDPF_MAX_TSO_FRAME_SIZE 262143 #define IDPF_TX_MAX_MTU_SEG 10 +#define IDPF_TX_CKSUM_OFFLOAD_MASK ( \ + RTE_MBUF_F_TX_IP_CKSUM | \ + RTE_MBUF_F_TX_L4_MASK | \ + RTE_MBUF_F_TX_TCP_SEG) + +#define IDPF_TX_OFFLOAD_MASK ( \ + RTE_MBUF_F_TX_OUTER_IPV6 | \ + RTE_MBUF_F_TX_OUTER_IPV4 | \ + RTE_MBUF_F_TX_IPV6 | \ + RTE_MBUF_F_TX_IPV4 | \ + RTE_MBUF_F_TX_VLAN | \ + RTE_MBUF_F_TX_IP_CKSUM | \ + RTE_MBUF_F_TX_L4_MASK | \ + RTE_MBUF_F_TX_TCP_SEG | \ + RTE_ETH_TX_OFFLOAD_SECURITY) + +#define IDPF_TX_OFFLOAD_NOTSUP_MASK \ + (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK) + #define IDPF_GET_PTYPE_SIZE(p) \ (sizeof(struct virtchnl2_ptype) + \ (((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0]))) -- 2.34.1