From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5ED4FA0548; Wed, 1 Jun 2022 15:30:14 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 94B1B4280D; Wed, 1 Jun 2022 15:30:09 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id 66CB54113F for ; Wed, 1 Jun 2022 15:30:08 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1654090208; x=1685626208; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=9II7b6UPBfFmUHJ5diK0JrpJm7lZ3J6NHXjQ+8itT+A=; b=JCaTe1uPhIfgt0BhWFN89PmRYjBPGcm4VgqloNqt2X5SgQbVSXtNxX3i +L+qYfcm23pbJaSpnUjD+pAP3tORWY1MwJMfxCdzrm4UnTSiQXFTWFXY/ dxFiYaNozBGaRSnKn9Ys2jM090j1xQnt9NLJfz1Ar7i6CfwF1o20t9esS /a3tI7Hl+Ar5Y2Oue9l0BNwOCmjRFlN9ryskCYB6MwIExDKpTwylcWHB1 5M9nYpdGST2bKbMubp6ssK7vD+a9YB1ocQ/O+FklIen5RaVGvU5EOCPrQ jOIThvNVUEAfaNaIrNqAr8K1Ia8vxszZhHqh9QOH052XsBblqTRxy6XmC g==; X-IronPort-AV: E=McAfee;i="6400,9594,10365"; a="336242760" X-IronPort-AV: E=Sophos;i="5.91,268,1647327600"; d="scan'208";a="336242760" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Jun 2022 06:30:07 -0700 X-IronPort-AV: E=Sophos;i="5.91,268,1647327600"; d="scan'208";a="606264064" Received: from unknown (HELO localhost.localdomain) ([10.239.251.3]) by orsmga008-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Jun 2022 06:30:03 -0700 From: wenxuanx.wu@intel.com To: dev@dpdk.org, qi.z.zhang@intel.com, jerinjacobk@gmail.com, xiaoyun.li@intel.com, aman.deep.singh@intel.com, yuying.zhang@intel.com Cc: stephen@networkplumber.org, mb@smartsharesystems.com, viacheslavo@nvidia.com, ping.yu@intel.com, xuan.ding@intel.com, yuanx.wang@intel.com, wenxuanx.wu@intel.com Subject: [PATCH v7 2/3] net/ice: support buffer split in Rx path Date: Wed, 1 Jun 2022 13:06:13 +0000 Message-Id: <20220601130614.955111-3-wenxuanx.wu@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220601130614.955111-1-wenxuanx.wu@intel.com> References: <20220303060136.36427-1-xuan.ding@intel.com> <20220601130614.955111-1-wenxuanx.wu@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Wenxuan Wu This patch adds support for proto based buffer split in normal Rx data paths. When the Rx queue is configured with specific protocol type, packets received will be directly splitted into protocol header and payload parts. And the two parts will be put into different mempools. Currently, protocol based buffer split is not supported in vectorized paths. Signed-off-by: Xuan Ding Signed-off-by: Yuan Wang Signed-off-by: Wenxuan Wu Reviewed-by: Qi Zhang --- drivers/net/ice/ice_ethdev.c | 10 +- drivers/net/ice/ice_rxtx.c | 220 ++++++++++++++++++++++---- drivers/net/ice/ice_rxtx.h | 16 ++ drivers/net/ice/ice_rxtx_vec_common.h | 3 + lib/ethdev/rte_ethdev.c | 2 +- 5 files changed, 218 insertions(+), 33 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 73e550f5fb..ce3f49c863 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -3713,7 +3713,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | RTE_ETH_RX_OFFLOAD_RSS_HASH | - RTE_ETH_RX_OFFLOAD_TIMESTAMP; + RTE_ETH_RX_OFFLOAD_TIMESTAMP | + RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT; dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT | RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | @@ -3725,7 +3726,7 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL; } - dev_info->rx_queue_offload_capa = 0; + dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT; dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; dev_info->reta_size = pf->hash_lut_size; @@ -3794,6 +3795,11 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN; dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN; + dev_info->rx_seg_capa.max_nseg = ICE_RX_MAX_NSEG; + dev_info->rx_seg_capa.multi_pools = 1; + dev_info->rx_seg_capa.offset_allowed = 0; + dev_info->rx_seg_capa.offset_align_log2 = 0; + return 0; } diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 2dd2637fbb..77ab258f7f 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -282,7 +282,6 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) /* Set buffer size as the head split is disabled. */ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM); - rxq->rx_hdr_len = 0; rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); rxq->max_pkt_len = RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, @@ -311,11 +310,53 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) memset(&rx_ctx, 0, sizeof(rx_ctx)); + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + switch (rxq->rxseg[0].proto_hdr) { + case RTE_PTYPE_L2_ETHER: + rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; + rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_L2; + break; + case RTE_PTYPE_INNER_L2_ETHER: + rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; + rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2; + break; + case RTE_PTYPE_L3_IPV4: + case RTE_PTYPE_L3_IPV6: + case RTE_PTYPE_INNER_L3_IPV4: + case RTE_PTYPE_INNER_L3_IPV6: + case RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6: + rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; + rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP; + break; + case RTE_PTYPE_L4_TCP: + case RTE_PTYPE_L4_UDP: + case RTE_PTYPE_INNER_L4_TCP: + case RTE_PTYPE_INNER_L4_UDP: + rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; + rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP; + break; + case RTE_PTYPE_L4_SCTP: + case RTE_PTYPE_INNER_L4_SCTP: + rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; + rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP; + break; + case 0: + PMD_DRV_LOG(ERR, "Buffer split protocol must be configured"); + return -EINVAL; + default: + PMD_DRV_LOG(ERR, "Buffer split protocol is not supported"); + return -EINVAL; + } + rxq->rx_hdr_len = ICE_RX_HDR_BUF_SIZE; + } else { + rxq->rx_hdr_len = 0; + rx_ctx.dtype = 0; /* No Protocol Based Buffer Split mode */ + } + rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; rx_ctx.qlen = rxq->nb_rx_desc; rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; - rx_ctx.dtype = 0; /* No Header Split mode */ #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC rx_ctx.dsize = 1; /* 32B descriptors */ #endif @@ -401,6 +442,7 @@ ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq) for (i = 0; i < rxq->nb_rx_desc; i++) { volatile union ice_rx_flex_desc *rxd; + rxd = &rxq->rx_ring[i]; struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(!mbuf)) { @@ -408,8 +450,6 @@ ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq) return -ENOMEM; } - rte_mbuf_refcnt_set(mbuf, 1); - mbuf->next = NULL; mbuf->data_off = RTE_PKTMBUF_HEADROOM; mbuf->nb_segs = 1; mbuf->port = rxq->port_id; @@ -417,9 +457,33 @@ ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq) dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); - rxd = &rxq->rx_ring[i]; - rxd->read.pkt_addr = dma_addr; - rxd->read.hdr_addr = 0; + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + struct rte_mbuf *mbuf_pay; + mbuf_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp); + if (unlikely(!mbuf_pay)) { + PMD_DRV_LOG(ERR, "Failed to allocate payload mbuf for RX"); + return -ENOMEM; + } + + mbuf_pay->next = NULL; + mbuf_pay->data_off = RTE_PKTMBUF_HEADROOM; + mbuf_pay->nb_segs = 1; + mbuf_pay->port = rxq->port_id; + mbuf->next = mbuf_pay; + + rxd->read.hdr_addr = dma_addr; + /* The LS bit should be set to zero regardless of + * buffer split enablement. + */ + rxd->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf_pay)); + + } else { + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->next = NULL; + rxd->read.hdr_addr = 0; + rxd->read.pkt_addr = dma_addr; + } + #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC rxd->read.rsvd1 = 0; rxd->read.rsvd2 = 0; @@ -443,14 +507,14 @@ _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq) for (i = 0; i < rxq->nb_rx_desc; i++) { if (rxq->sw_ring[i].mbuf) { - rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rte_pktmbuf_free(rxq->sw_ring[i].mbuf); rxq->sw_ring[i].mbuf = NULL; } } if (rxq->rx_nb_avail == 0) return; for (i = 0; i < rxq->rx_nb_avail; i++) - rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]); + rte_pktmbuf_free(rxq->rx_stage[rxq->rx_next_avail + i]); rxq->rx_nb_avail = 0; } @@ -742,7 +806,7 @@ ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq) rx_ctx.qlen = rxq->nb_rx_desc; rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; - rx_ctx.dtype = 0; /* No Header Split mode */ + rx_ctx.dtype = 0; /* No Buffer Split mode */ rx_ctx.dsize = 1; /* 32B descriptors */ rx_ctx.rxmax = ICE_ETH_MAX_LEN; /* TPH: Transaction Layer Packet (TLP) processing hints */ @@ -1076,6 +1140,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, uint16_t len; int use_def_burst_func = 1; uint64_t offloads; + uint16_t n_seg = rx_conf->rx_nseg; if (nb_desc % ICE_ALIGN_RING_DESC != 0 || nb_desc > ICE_MAX_RING_DESC || @@ -1087,6 +1152,17 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + if (mp) + n_seg = 1; + + if (n_seg > 1) { + if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { + PMD_INIT_LOG(ERR, "port %u queue index %u split offload not configured", + dev->data->port_id, queue_idx); + return -EINVAL; + } + } + /* Free memory if needed */ if (dev->data->rx_queues[queue_idx]) { ice_rx_queue_release(dev->data->rx_queues[queue_idx]); @@ -1098,12 +1174,22 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, sizeof(struct ice_rx_queue), RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq) { PMD_INIT_LOG(ERR, "Failed to allocate memory for " "rx queue data structure"); return -ENOMEM; } - rxq->mp = mp; + + rxq->rxseg_nb = n_seg; + if (n_seg > 1) { + rte_memcpy(rxq->rxseg, rx_conf->rx_seg, + sizeof(struct rte_eth_rxseg_split) * n_seg); + rxq->mp = rxq->rxseg[0].mp; + } else { + rxq->mp = mp; + } + rxq->nb_rx_desc = nb_desc; rxq->rx_free_thresh = rx_conf->rx_free_thresh; rxq->queue_id = queue_idx; @@ -1568,7 +1654,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) struct ice_rx_entry *rxep; struct rte_mbuf *mb; uint16_t stat_err0; - uint16_t pkt_len; + uint16_t pkt_len, hdr_len; int32_t s[ICE_LOOK_AHEAD], nb_dd; int32_t i, j, nb_rx = 0; uint64_t pkt_flags = 0; @@ -1623,6 +1709,24 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; mb->data_len = pkt_len; mb->pkt_len = pkt_len; + + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + mb->nb_segs = (uint16_t)(mb->nb_segs + mb->next->nb_segs); + mb->next->next = NULL; + hdr_len = rte_le_to_cpu_16(rxdp[j].wb.hdr_len_sph_flex_flags1) & + ICE_RX_FLEX_DESC_HEADER_LEN_M; + pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + mb->data_len = hdr_len; + mb->pkt_len = hdr_len + pkt_len; + mb->next->data_len = pkt_len; + } else { + pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + mb->data_len = pkt_len; + mb->pkt_len = pkt_len; + } + mb->ol_flags = 0; stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0); pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0); @@ -1714,7 +1818,9 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) struct rte_mbuf *mb; uint16_t alloc_idx, i; uint64_t dma_addr; - int diag; + int diag, diag_pay; + uint64_t pay_addr; + struct rte_mbuf *mbufs_pay[rxq->rx_free_thresh]; /* Allocate buffers in bulk */ alloc_idx = (uint16_t)(rxq->rx_free_trigger - @@ -1727,6 +1833,15 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) return -ENOMEM; } + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + diag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp, + (void *)mbufs_pay, rxq->rx_free_thresh); + if (unlikely(diag_pay != 0)) { + PMD_RX_LOG(ERR, "Failed to get payload mbufs in bulk"); + return -ENOMEM; + } + } + rxdp = &rxq->rx_ring[alloc_idx]; for (i = 0; i < rxq->rx_free_thresh; i++) { if (likely(i < (rxq->rx_free_thresh - 1))) @@ -1735,13 +1850,21 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) mb = rxep[i].mbuf; rte_mbuf_refcnt_set(mb, 1); - mb->next = NULL; mb->data_off = RTE_PKTMBUF_HEADROOM; mb->nb_segs = 1; mb->port = rxq->port_id; dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb)); - rxdp[i].read.hdr_addr = 0; - rxdp[i].read.pkt_addr = dma_addr; + + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + mb->next = mbufs_pay[i]; + pay_addr = rte_mbuf_data_iova_default(mbufs_pay[i]); + rxdp[i].read.hdr_addr = dma_addr; + rxdp[i].read.pkt_addr = rte_cpu_to_le_64(pay_addr); + } else { + mb->next = NULL; + rxdp[i].read.hdr_addr = 0; + rxdp[i].read.pkt_addr = dma_addr; + } } /* Update Rx tail register */ @@ -2350,11 +2473,13 @@ ice_recv_pkts(void *rx_queue, struct ice_rx_entry *sw_ring = rxq->sw_ring; struct ice_rx_entry *rxe; struct rte_mbuf *nmb; /* new allocated mbuf */ + struct rte_mbuf *nmb_pay; /* new allocated payload mbuf */ struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */ uint16_t rx_id = rxq->rx_tail; uint16_t nb_rx = 0; uint16_t nb_hold = 0; uint16_t rx_packet_len; + uint16_t rx_header_len; uint16_t rx_stat_err0; uint64_t dma_addr; uint64_t pkt_flags; @@ -2382,12 +2507,16 @@ ice_recv_pkts(void *rx_queue, if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) break; - /* allocate mbuf */ + if (rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S)) + break; + + /* allocate header mbuf */ nmb = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(!nmb)) { rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++; break; } + rxd = *rxdp; /* copy descriptor in ring to temp variable*/ nb_hold++; @@ -2400,24 +2529,55 @@ ice_recv_pkts(void *rx_queue, dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); - /** - * fill the read format of descriptor with physic address in - * new allocated mbuf: nmb - */ - rxdp->read.hdr_addr = 0; - rxdp->read.pkt_addr = dma_addr; + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + /* allocate payload mbuf */ + nmb_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp); + if (unlikely(!nmb_pay)) { + rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++; + break; + } + + nmb->next = nmb_pay; + nmb_pay->next = NULL; - /* calculate rx_packet_len of the received pkt */ - rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & - ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + /** + * fill the read format of descriptor with physic address in + * new allocated mbuf: nmb + */ + rxdp->read.hdr_addr = dma_addr; + rxdp->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb_pay)); + } else { + /** + * fill the read format of descriptor with physic address in + * new allocated mbuf: nmb + */ + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + } /* fill old mbuf with received descriptor: rxd */ rxm->data_off = RTE_PKTMBUF_HEADROOM; rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); - rxm->nb_segs = 1; - rxm->next = NULL; - rxm->pkt_len = rx_packet_len; - rxm->data_len = rx_packet_len; + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + rxm->nb_segs = (uint16_t)(rxm->nb_segs + rxm->next->nb_segs); + rxm->next->next = NULL; + /* calculate rx_packet_len of the received pkt */ + rx_header_len = rte_le_to_cpu_16(rxd.wb.hdr_len_sph_flex_flags1) & + ICE_RX_FLEX_DESC_HEADER_LEN_M; + rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + rxm->data_len = rx_header_len; + rxm->pkt_len = rx_header_len + rx_packet_len; + rxm->next->data_len = rx_packet_len; + } else { + rxm->nb_segs = 1; + rxm->next = NULL; + /* calculate rx_packet_len of the received pkt */ + rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + rxm->data_len = rx_packet_len; + rxm->pkt_len = rx_packet_len; + } rxm->port = rxq->port_id; rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index bb18a01951..611dbc8503 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -16,6 +16,9 @@ #define ICE_RX_MAX_BURST 32 #define ICE_TX_MAX_BURST 32 +/* Maximal number of segments to split. */ +#define ICE_RX_MAX_NSEG 2 + #define ICE_CHK_Q_ENA_COUNT 100 #define ICE_CHK_Q_ENA_INTERVAL_US 100 @@ -43,6 +46,11 @@ extern uint64_t ice_timestamp_dynflag; extern int ice_timestamp_dynfield_offset; +/* Max header size can be 2K - 64 bytes */ +#define ICE_RX_HDR_BUF_SIZE (2048 - 64) + +#define ICE_HEADER_SPLIT_ENA BIT(0) + typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq); typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq); typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq, @@ -53,6 +61,12 @@ struct ice_rx_entry { struct rte_mbuf *mbuf; }; +enum ice_rx_dtype { + ICE_RX_DTYPE_NO_SPLIT = 0, + ICE_RX_DTYPE_HEADER_SPLIT = 1, + ICE_RX_DTYPE_SPLIT_ALWAYS = 2, +}; + struct ice_rx_queue { struct rte_mempool *mp; /* mbuf pool to populate RX ring */ volatile union ice_rx_flex_desc *rx_ring;/* RX ring virtual address */ @@ -95,6 +109,8 @@ struct ice_rx_queue { uint32_t time_high; uint32_t hw_register_set; const struct rte_memzone *mz; + struct rte_eth_rxseg_split rxseg[ICE_RX_MAX_NSEG]; + uint32_t rxseg_nb; }; struct ice_tx_entry { diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h index 2dd2d83650..eec6ea2134 100644 --- a/drivers/net/ice/ice_rxtx_vec_common.h +++ b/drivers/net/ice/ice_rxtx_vec_common.h @@ -291,6 +291,9 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq) if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) return -1; + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) + return -1; + if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD) return ICE_VECTOR_OFFLOAD_PATH; diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index be161ff999..fbd55cdd9d 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -1707,7 +1707,7 @@ rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, } } else { /* Split after specified protocol header. */ - if (proto_hdr & RTE_BUFFER_SPLIT_PROTO_HDR_MASK) { + if (!(proto_hdr & RTE_BUFFER_SPLIT_PROTO_HDR_MASK)) { RTE_ETHDEV_LOG(ERR, "Protocol header %u not supported)\n", proto_hdr); -- 2.25.1