From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 502CFA04FD; Wed, 26 Oct 2022 12:13:53 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id EFA8D42C79; Wed, 26 Oct 2022 12:12:50 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by mails.dpdk.org (Postfix) with ESMTP id A9C9E42C4F for ; Wed, 26 Oct 2022 12:12:40 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1666779160; x=1698315160; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=AsOQfNG2bSsBpSsKvEF0BzRDBaeIvrVYRaZunVtiLJ4=; b=Iw+oHVmGhR2SwjEseF4Cs7RZWyuUcaTDzHUSRE1cDvT4JUdRkK/ejQro m46awb6yTeTDLaRYvPwJvUg4vvXjpcQErGUXFJiYW/4MPCHpSoq68f8hW Ma6ViEWS+VawAIP7fOrhdwjOZzWr4SNkA5bb0jSfezYfpLC0EDxamMAFc xb/Qg0scEik5yTycdDxJG/PLyngcJ5BzvQk626yuzEI0s12RppUITRp09 ycGZV/sNUFUcsrWbCCl/QtuJX5mJUDpDdLSIMvVzsZcNCURca7cFMkq6u fKoXkYdKND8hpFNNY7QfcaWmGGyJ17VJrzNbvWjR5zyMaG2trCA1z5tvs A==; X-IronPort-AV: E=McAfee;i="6500,9779,10511"; a="307904686" X-IronPort-AV: E=Sophos;i="5.95,214,1661842800"; d="scan'208";a="307904686" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Oct 2022 03:12:40 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10511"; a="695306444" X-IronPort-AV: E=Sophos;i="5.95,214,1661842800"; d="scan'208";a="695306444" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga008.fm.intel.com with ESMTP; 26 Oct 2022 03:12:38 -0700 From: Junfeng Guo To: andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Xiaoyun Li Subject: [PATCH v12 15/18] net/idpf: add support for Rx offloading Date: Wed, 26 Oct 2022 18:10:24 +0800 Message-Id: <20221026101027.240583-16-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20221026101027.240583-1-junfeng.guo@intel.com> References: <20221024131227.1062446-2-junfeng.guo@intel.com> <20221026101027.240583-1-junfeng.guo@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add Rx offloading support: - support CHKSUM and RSS offload for split queue model - support CHKSUM offload for single queue model Signed-off-by: Beilei Xing Signed-off-by: Xiaoyun Li Signed-off-by: Junfeng Guo --- doc/guides/nics/features/idpf.ini | 5 ++ drivers/net/idpf/idpf_ethdev.c | 6 ++ drivers/net/idpf/idpf_rxtx.c | 122 ++++++++++++++++++++++++++++++ 3 files changed, 133 insertions(+) diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini index c8f4a0d6ed..6eefac9529 100644 --- a/doc/guides/nics/features/idpf.ini +++ b/doc/guides/nics/features/idpf.ini @@ -3,9 +3,14 @@ ; ; Refer to default.ini for the full list of available PMD features. ; +; A feature with "P" indicates only be supported when non-vector path +; is selected. +; [Features] Queue start/stop = Y MTU update = Y +L3 checksum offload = P +L4 checksum offload = P Linux = Y x86-32 = Y x86-64 = Y diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index ea7d3bfd7e..88f3db3267 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -89,6 +89,12 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL; dev_info->max_mac_addrs = IDPF_NUM_MACADDR_MAX; + dev_info->rx_offload_capa = + RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS; dev_info->default_txconf = (struct rte_eth_txconf) { diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c index f9f751a6ad..cdd20702d8 100644 --- a/drivers/net/idpf/idpf_rxtx.c +++ b/drivers/net/idpf/idpf_rxtx.c @@ -1209,6 +1209,72 @@ idpf_stop_queues(struct rte_eth_dev *dev) } } +#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S \ + (RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) | \ + RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) | \ + RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) | \ + RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) + +static inline uint64_t +idpf_splitq_rx_csum_offload(uint8_t err) +{ + uint64_t flags = 0; + + if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)) == 0)) + return flags; + + if (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) { + flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | + RTE_MBUF_F_RX_L4_CKSUM_GOOD); + return flags; + } + + if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)) != 0)) + flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + + if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)) != 0)) + flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + + if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)) != 0)) + flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; + + if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) != 0)) + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD; + + return flags; +} + +#define IDPF_RX_FLEX_DESC_ADV_HASH1_S 0 +#define IDPF_RX_FLEX_DESC_ADV_HASH2_S 16 +#define IDPF_RX_FLEX_DESC_ADV_HASH3_S 24 + +static inline uint64_t +idpf_splitq_rx_rss_offload(struct rte_mbuf *mb, + volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) +{ + uint8_t status_err0_qw0; + uint64_t flags = 0; + + status_err0_qw0 = rx_desc->status_err0_qw0; + + if ((status_err0_qw0 & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) != 0) { + flags |= RTE_MBUF_F_RX_RSS_HASH; + mb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) << + IDPF_RX_FLEX_DESC_ADV_HASH1_S) | + ((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) << + IDPF_RX_FLEX_DESC_ADV_HASH2_S) | + ((uint32_t)(rx_desc->hash3) << + IDPF_RX_FLEX_DESC_ADV_HASH3_S); + } + + return flags; +} static void idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq) @@ -1283,9 +1349,11 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pktlen_gen_bufq_id; struct idpf_rx_queue *rxq; const uint32_t *ptype_tbl; + uint8_t status_err0_qw1; struct rte_mbuf *rxm; uint16_t rx_id_bufq1; uint16_t rx_id_bufq2; + uint64_t pkt_flags; uint16_t pkt_len; uint16_t bufq_id; uint16_t gen_id; @@ -1350,11 +1418,18 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->next = NULL; rxm->nb_segs = 1; rxm->port = rxq->port_id; + rxm->ol_flags = 0; rxm->packet_type = ptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) & VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >> VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S]; + status_err0_qw1 = rx_desc->status_err0_qw1; + pkt_flags = idpf_splitq_rx_csum_offload(status_err0_qw1); + pkt_flags |= idpf_splitq_rx_rss_offload(rxm, rx_desc); + + rxm->ol_flags |= pkt_flags; + rx_pkts[nb_rx++] = rxm; } @@ -1514,6 +1589,48 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } +#define IDPF_RX_FLEX_DESC_STATUS0_XSUM_S \ + (RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \ + RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \ + RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \ + RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)) + +/* Translate the rx descriptor status and error fields to pkt flags */ +static inline uint64_t +idpf_rxd_to_pkt_flags(uint16_t status_error) +{ + uint64_t flags = 0; + + if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_S)) == 0)) + return flags; + + if (likely((status_error & IDPF_RX_FLEX_DESC_STATUS0_XSUM_S) == 0)) { + flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | + RTE_MBUF_F_RX_L4_CKSUM_GOOD); + return flags; + } + + if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)) != 0)) + flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + + if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) != 0)) + flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + + if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)) != 0)) + flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; + + if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)) != 0)) + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD; + + return flags; +} + static inline void idpf_update_rx_tail(struct idpf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id) @@ -1547,6 +1664,7 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct rte_mbuf *rxm; struct rte_mbuf *nmb; uint16_t rx_status0; + uint64_t pkt_flags; uint64_t dma_addr; uint16_t nb_rx; @@ -1612,10 +1730,14 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->pkt_len = rx_packet_len; rxm->data_len = rx_packet_len; rxm->port = rxq->port_id; + rxm->ol_flags = 0; + pkt_flags = idpf_rxd_to_pkt_flags(rx_status0); rxm->packet_type = ptype_tbl[(uint8_t)(rte_cpu_to_le_16(rxd.flex_nic_wb.ptype_flex_flags0) & VIRTCHNL2_RX_FLEX_DESC_PTYPE_M)]; + rxm->ol_flags |= pkt_flags; + rx_pkts[nb_rx++] = rxm; } rxq->rx_tail = rx_id; -- 2.34.1