From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 08009A00C5; Thu, 27 Oct 2022 09:51:22 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 25CF142C28; Thu, 27 Oct 2022 09:50:03 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by mails.dpdk.org (Postfix) with ESMTP id F2DCC42C00 for ; Thu, 27 Oct 2022 09:49:55 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1666856996; x=1698392996; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=4mfn+rJMpGvL9hzEpPa1shhTzhuOVtkQfygeGFbIGbA=; b=MJ3Udj3gaYHjjv4B+SG9Hg2cuS+jmtKe5ESDGjWuFc2r1GDnDf4DJ1v2 +W7VPb5eOOGddeFqczP8J9kFwKjedXy/iZ/RQn2MvmX+zr/5rxaKjZo0V XcrEQDxg4gql/IM4iCgg9Gn7h3Irupp01xEaskCFSR4sQmKnZswfxBSYp SSbSHho0oUnSp3XbZEwa/jLDizwI4w6YjQLgLAShvwqCVi2DwFBu/YIBa 66Mvu7EUa+Jlzw6cgJ8YXBnndUygZtGei+R1NXxONW7eKwB2rmSvJoY37 ceh7C96xZxHw/ngolzkl1jLQmEHKVUymnY9a4lPKf2ORbtVu1dr+MdLx8 g==; X-IronPort-AV: E=McAfee;i="6500,9779,10512"; a="287873219" X-IronPort-AV: E=Sophos;i="5.95,217,1661842800"; d="scan'208";a="287873219" Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Oct 2022 00:49:45 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10512"; a="757607754" X-IronPort-AV: E=Sophos;i="5.95,217,1661842800"; d="scan'208";a="757607754" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by orsmga004.jf.intel.com with ESMTP; 27 Oct 2022 00:49:44 -0700 From: Junfeng Guo To: andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Xiaoyun Li Subject: [PATCH v14 15/18] net/idpf: add support for Rx offloading Date: Thu, 27 Oct 2022 15:47:26 +0800 Message-Id: <20221027074729.1494529-16-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20221027074729.1494529-1-junfeng.guo@intel.com> References: <20221027054505.1369248-2-junfeng.guo@intel.com> <20221027074729.1494529-1-junfeng.guo@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add Rx offloading support: - support CHKSUM and RSS offload for split queue model - support CHKSUM offload for single queue model Signed-off-by: Beilei Xing Signed-off-by: Xiaoyun Li Signed-off-by: Junfeng Guo --- doc/guides/nics/features/idpf.ini | 5 ++ drivers/net/idpf/idpf_ethdev.c | 6 ++ drivers/net/idpf/idpf_rxtx.c | 123 ++++++++++++++++++++++++++++++ 3 files changed, 134 insertions(+) diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini index c8f4a0d6ed..6eefac9529 100644 --- a/doc/guides/nics/features/idpf.ini +++ b/doc/guides/nics/features/idpf.ini @@ -3,9 +3,14 @@ ; ; Refer to default.ini for the full list of available PMD features. ; +; A feature with "P" indicates only be supported when non-vector path +; is selected. +; [Features] Queue start/stop = Y MTU update = Y +L3 checksum offload = P +L4 checksum offload = P Linux = Y x86-32 = Y x86-64 = Y diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index ea7d3bfd7e..88f3db3267 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -89,6 +89,12 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL; dev_info->max_mac_addrs = IDPF_NUM_MACADDR_MAX; + dev_info->rx_offload_capa = + RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS; dev_info->default_txconf = (struct rte_eth_txconf) { diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c index a980714060..f15e61a785 100644 --- a/drivers/net/idpf/idpf_rxtx.c +++ b/drivers/net/idpf/idpf_rxtx.c @@ -1209,6 +1209,73 @@ idpf_stop_queues(struct rte_eth_dev *dev) } } +#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S \ + (RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) | \ + RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) | \ + RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) | \ + RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) + +static inline uint64_t +idpf_splitq_rx_csum_offload(uint8_t err) +{ + uint64_t flags = 0; + + if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)) == 0)) + return flags; + + if (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) { + flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | + RTE_MBUF_F_RX_L4_CKSUM_GOOD); + return flags; + } + + if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)) != 0)) + flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + + if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)) != 0)) + flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + + if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)) != 0)) + flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; + + if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) != 0)) + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD; + + return flags; +} + +#define IDPF_RX_FLEX_DESC_ADV_HASH1_S 0 +#define IDPF_RX_FLEX_DESC_ADV_HASH2_S 16 +#define IDPF_RX_FLEX_DESC_ADV_HASH3_S 24 + +static inline uint64_t +idpf_splitq_rx_rss_offload(struct rte_mbuf *mb, + volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) +{ + uint8_t status_err0_qw0; + uint64_t flags = 0; + + status_err0_qw0 = rx_desc->status_err0_qw0; + + if ((status_err0_qw0 & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) != 0) { + flags |= RTE_MBUF_F_RX_RSS_HASH; + mb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) << + IDPF_RX_FLEX_DESC_ADV_HASH1_S) | + ((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) << + IDPF_RX_FLEX_DESC_ADV_HASH2_S) | + ((uint32_t)(rx_desc->hash3) << + IDPF_RX_FLEX_DESC_ADV_HASH3_S); + } + + return flags; +} + static void idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq) { @@ -1282,9 +1349,11 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pktlen_gen_bufq_id; struct idpf_rx_queue *rxq; const uint32_t *ptype_tbl; + uint8_t status_err0_qw1; struct rte_mbuf *rxm; uint16_t rx_id_bufq1; uint16_t rx_id_bufq2; + uint64_t pkt_flags; uint16_t pkt_len; uint16_t bufq_id; uint16_t gen_id; @@ -1349,11 +1418,18 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->next = NULL; rxm->nb_segs = 1; rxm->port = rxq->port_id; + rxm->ol_flags = 0; rxm->packet_type = ptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) & VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >> VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S]; + status_err0_qw1 = rx_desc->status_err0_qw1; + pkt_flags = idpf_splitq_rx_csum_offload(status_err0_qw1); + pkt_flags |= idpf_splitq_rx_rss_offload(rxm, rx_desc); + + rxm->ol_flags |= pkt_flags; + rx_pkts[nb_rx++] = rxm; } @@ -1513,6 +1589,48 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } +#define IDPF_RX_FLEX_DESC_STATUS0_XSUM_S \ + (RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \ + RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \ + RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \ + RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)) + +/* Translate the rx descriptor status and error fields to pkt flags */ +static inline uint64_t +idpf_rxd_to_pkt_flags(uint16_t status_error) +{ + uint64_t flags = 0; + + if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_S)) == 0)) + return flags; + + if (likely((status_error & IDPF_RX_FLEX_DESC_STATUS0_XSUM_S) == 0)) { + flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | + RTE_MBUF_F_RX_L4_CKSUM_GOOD); + return flags; + } + + if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)) != 0)) + flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + + if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) != 0)) + flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + + if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)) != 0)) + flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; + + if (unlikely((status_error & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)) != 0)) + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD; + + return flags; +} + static inline void idpf_update_rx_tail(struct idpf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id) @@ -1546,6 +1664,7 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct rte_mbuf *rxm; struct rte_mbuf *nmb; uint16_t rx_status0; + uint64_t pkt_flags; uint64_t dma_addr; uint16_t nb_rx; @@ -1611,10 +1730,14 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->pkt_len = rx_packet_len; rxm->data_len = rx_packet_len; rxm->port = rxq->port_id; + rxm->ol_flags = 0; + pkt_flags = idpf_rxd_to_pkt_flags(rx_status0); rxm->packet_type = ptype_tbl[(uint8_t)(rte_cpu_to_le_16(rxd.flex_nic_wb.ptype_flex_flags0) & VIRTCHNL2_RX_FLEX_DESC_PTYPE_M)]; + rxm->ol_flags |= pkt_flags; + rx_pkts[nb_rx++] = rxm; } rxq->rx_tail = rx_id; -- 2.34.1