From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 0521CA00C2; Mon, 31 Oct 2022 10:06:34 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0A6CF42B6D; Mon, 31 Oct 2022 10:04:57 +0100 (CET) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 6191B40DF6 for ; Mon, 31 Oct 2022 10:04:32 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1667207072; x=1698743072; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=ZHtbm7ocM+oQCXmuwEBmWznzSXP2g6MadfJjp8DCK7Y=; b=BuFAAawWR1bqk7kCU9BXI7OvJNh7Aw9E8VcH+r39tNyumvnnhFGDXfRS 2yGQjx+BcrA0W8sa2gtLiH4AEjqK9ptvRL46agSgIQ1IXjij3SH9dkVXp r6sjtv0xqc6Xs+bJbx2YFVVaoC5sjLu8n9l8RimktrrRxoq5RmoYAQxWQ qkIfB25wLJwkinRK6JyXKI1hgDJC6OL+cXWlQGjnW0QN+W3ztKUuvWNyt LZDLazIgWM3W3xXp+Ftmf83LZL/kNHBCcyp8DY+cAyf5DGzfTJaLi6keC eFtOvN53hbjNmyd1xnLMmrgOGmOy/je6MUCaXQNgi9OVNs4+CM+IazybM g==; X-IronPort-AV: E=McAfee;i="6500,9779,10516"; a="335507850" X-IronPort-AV: E=Sophos;i="5.95,227,1661842800"; d="scan'208";a="335507850" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 31 Oct 2022 02:04:31 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10516"; a="878664944" X-IronPort-AV: E=Sophos;i="5.95,227,1661842800"; d="scan'208";a="878664944" Received: from dpdk-beileix-3.sh.intel.com ([10.67.110.253]) by fmsmga006.fm.intel.com with ESMTP; 31 Oct 2022 02:04:28 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Xiaoyun Li Subject: [PATCH v18 16/18] net/idpf: add support for Tx offloading Date: Mon, 31 Oct 2022 08:33:44 +0000 Message-Id: <20221031083346.16558-17-beilei.xing@intel.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20221031083346.16558-1-beilei.xing@intel.com> References: <20221031051556.98549-1-beilei.xing@intel.com> <20221031083346.16558-1-beilei.xing@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Junfeng Guo Add Tx offloading support: - support TSO for single queue model and split queue model. Signed-off-by: Beilei Xing Signed-off-by: Xiaoyun Li Signed-off-by: Junfeng Guo --- doc/guides/nics/features/idpf.ini | 1 + drivers/net/idpf/idpf_ethdev.c | 4 +- drivers/net/idpf/idpf_rxtx.c | 128 +++++++++++++++++++++++++++++- drivers/net/idpf/idpf_rxtx.h | 22 +++++ 4 files changed, 152 insertions(+), 3 deletions(-) diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini index 868571654f..d82b4aa0ff 100644 --- a/doc/guides/nics/features/idpf.ini +++ b/doc/guides/nics/features/idpf.ini @@ -8,6 +8,7 @@ ; [Features] MTU update = Y +TSO = P L3 checksum offload = P L4 checksum offload = P Linux = Y diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index a09f104425..084426260c 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -67,7 +67,9 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM; - dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS; + dev_info->tx_offload_capa = + RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS; dev_info->default_txconf = (struct rte_eth_txconf) { .tx_free_thresh = IDPF_DEFAULT_TX_FREE_THRESH, diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c index f15e61a785..cc296d7ab1 100644 --- a/drivers/net/idpf/idpf_rxtx.c +++ b/drivers/net/idpf/idpf_rxtx.c @@ -1506,6 +1506,49 @@ idpf_split_tx_free(struct idpf_tx_queue *cq) cq->tx_tail = next; } +/* Check if the context descriptor is needed for TX offloading */ +static inline uint16_t +idpf_calc_context_desc(uint64_t flags) +{ + if ((flags & RTE_MBUF_F_TX_TCP_SEG) != 0) + return 1; + + return 0; +} + +/* set TSO context descriptor + */ +static inline void +idpf_set_splitq_tso_ctx(struct rte_mbuf *mbuf, + union idpf_tx_offload tx_offload, + volatile union idpf_flex_tx_ctx_desc *ctx_desc) +{ + uint16_t cmd_dtype; + uint32_t tso_len; + uint8_t hdr_len; + + if (tx_offload.l4_len == 0) { + PMD_TX_LOG(DEBUG, "L4 length set to 0"); + return; + } + + hdr_len = tx_offload.l2_len + + tx_offload.l3_len + + tx_offload.l4_len; + cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX | + IDPF_TX_FLEX_CTX_DESC_CMD_TSO; + tso_len = mbuf->pkt_len - hdr_len; + + ctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype); + ctx_desc->tso.qw0.hdr_len = hdr_len; + ctx_desc->tso.qw0.mss_rt = + rte_cpu_to_le_16((uint16_t)mbuf->tso_segsz & + IDPF_TXD_FLEX_CTX_MSS_RT_M); + ctx_desc->tso.qw0.flex_tlen = + rte_cpu_to_le_32(tso_len & + IDPF_TXD_FLEX_CTX_MSS_RT_M); +} + uint16_t idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -1514,11 +1557,14 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, volatile struct idpf_flex_tx_sched_desc *txr; volatile struct idpf_flex_tx_sched_desc *txd; struct idpf_tx_entry *sw_ring; + union idpf_tx_offload tx_offload = {0}; struct idpf_tx_entry *txe, *txn; uint16_t nb_used, tx_id, sw_id; struct rte_mbuf *tx_pkt; uint16_t nb_to_clean; uint16_t nb_tx = 0; + uint64_t ol_flags; + uint16_t nb_ctx; if (unlikely(txq == NULL) || unlikely(!txq->q_started)) return nb_tx; @@ -1548,7 +1594,29 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (txq->nb_free < tx_pkt->nb_segs) break; - nb_used = tx_pkt->nb_segs; + + ol_flags = tx_pkt->ol_flags; + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + /* Calculate the number of context descriptors needed. */ + nb_ctx = idpf_calc_context_desc(ol_flags); + nb_used = tx_pkt->nb_segs + nb_ctx; + + /* context descriptor */ + if (nb_ctx != 0) { + volatile union idpf_flex_tx_ctx_desc *ctx_desc = + (volatile union idpf_flex_tx_ctx_desc *)&txr[tx_id]; + + if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0) + idpf_set_splitq_tso_ctx(tx_pkt, tx_offload, + ctx_desc); + + tx_id++; + if (tx_id == txq->nb_tx_desc) + tx_id = 0; + } do { txd = &txr[tx_id]; @@ -1799,14 +1867,17 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, { volatile struct idpf_flex_tx_desc *txd; volatile struct idpf_flex_tx_desc *txr; + union idpf_tx_offload tx_offload = {0}; struct idpf_tx_entry *txe, *txn; struct idpf_tx_entry *sw_ring; struct idpf_tx_queue *txq; struct rte_mbuf *tx_pkt; struct rte_mbuf *m_seg; uint64_t buf_dma_addr; + uint64_t ol_flags; uint16_t tx_last; uint16_t nb_used; + uint16_t nb_ctx; uint16_t td_cmd; uint16_t tx_id; uint16_t nb_tx; @@ -1833,11 +1904,19 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_pkt = *tx_pkts++; RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + ol_flags = tx_pkt->ol_flags; + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + /* Calculate the number of context descriptors needed. */ + nb_ctx = idpf_calc_context_desc(ol_flags); + /* The number of descriptors that must be allocated for * a packet equals to the number of the segments of that * packet plus 1 context descriptor if needed. */ - nb_used = (uint16_t)(tx_pkt->nb_segs); + nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); tx_last = (uint16_t)(tx_id + nb_used - 1); /* Circular ring */ @@ -1865,6 +1944,29 @@ idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } } + if (nb_ctx != 0) { + /* Setup TX context descriptor if required */ + volatile union idpf_flex_tx_ctx_desc *ctx_txd = + (volatile union idpf_flex_tx_ctx_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + /* TSO enabled */ + if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0) + idpf_set_splitq_tso_ctx(tx_pkt, tx_offload, + ctx_txd); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + m_seg = tx_pkt; do { txd = &txr[tx_id]; @@ -1924,6 +2026,9 @@ uint16_t idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + int ret; +#endif int i; uint64_t ol_flags; struct rte_mbuf *m; @@ -1938,12 +2043,31 @@ idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, rte_errno = EINVAL; return i; } + } else if ((m->tso_segsz < IDPF_MIN_TSO_MSS) || + (m->tso_segsz > IDPF_MAX_TSO_MSS) || + (m->pkt_len > IDPF_MAX_TSO_FRAME_SIZE)) { + /* MSS outside the range are considered malicious */ + rte_errno = EINVAL; + return i; + } + + if ((ol_flags & IDPF_TX_OFFLOAD_NOTSUP_MASK) != 0) { + rte_errno = ENOTSUP; + return i; } if (m->pkt_len < IDPF_MIN_FRAME_SIZE) { rte_errno = EINVAL; return i; } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif } return i; diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h index 3853ed55c9..54d297aac6 100644 --- a/drivers/net/idpf/idpf_rxtx.h +++ b/drivers/net/idpf/idpf_rxtx.h @@ -23,6 +23,16 @@ #define IDPF_TX_MAX_MTU_SEG 10 +#define IDPF_MIN_TSO_MSS 88 +#define IDPF_MAX_TSO_MSS 9728 +#define IDPF_MAX_TSO_FRAME_SIZE 262143 +#define IDPF_TX_MAX_MTU_SEG 10 + +#define IDPF_TX_OFFLOAD_MASK RTE_MBUF_F_TX_TCP_SEG + +#define IDPF_TX_OFFLOAD_NOTSUP_MASK \ + (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK) + #define IDPF_GET_PTYPE_SIZE(p) \ (sizeof(struct virtchnl2_ptype) + \ (((p)->proto_id_count ? ((p)->proto_id_count - 1) : 0) * sizeof((p)->proto_id[0]))) @@ -115,6 +125,18 @@ struct idpf_tx_queue { struct idpf_tx_queue *complq; }; +/* Offload features */ +union idpf_tx_offload { + uint64_t data; + struct { + uint64_t l2_len:7; /* L2 (MAC) Header Length. */ + uint64_t l3_len:9; /* L3 (IP) Header Length. */ + uint64_t l4_len:8; /* L4 Header Length. */ + uint64_t tso_segsz:16; /* TCP TSO segment size */ + /* uint64_t unused : 24; */ + }; +}; + struct idpf_rxq_ops { void (*release_mbufs)(struct idpf_rx_queue *rxq); }; -- 2.26.2