From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 66278A04FF; Tue, 22 Mar 2022 05:00:22 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id F067F427FE; Tue, 22 Mar 2022 05:00:16 +0100 (CET) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id 823A7410E5 for ; Tue, 22 Mar 2022 05:00:15 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1647921615; x=1679457615; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=5qHj/VCFAgir2kMx9EMBiqAXiDVxCVK/v7Dsv61iK7s=; b=ZCxO7y2CNjQ+JhYkr8p/jN8rP2xupnBhxAV96fx2FH5f4XPIoU+ic9WR 5WAZ7gJ0W0fiDjUhi4j6Gcsohkg6pEllItl/NUeyCA7W9T49PWJzZ11+t uJdFgUAwU8JR6oGdfR0SLbMtzqop3v7CBe8xnDFSpqC1cwIZESNLbjdix 06zHLN53kH5cI/T4OMfkeKvSN0CIUIPBDbkp+y6jTenbfwX4Nvb5mBXeR 1wqClwr9AidHXeRtxrdJoC+RBpZZn1p7FgY0EICxlxAdkSJumc5nD3yYg OI0JCDSShP8bBUds+gboHnWcksWneeAfao4tZkf1gED9HsHdDe6wMldOd Q==; X-IronPort-AV: E=McAfee;i="6200,9189,10293"; a="245189567" X-IronPort-AV: E=Sophos;i="5.90,200,1643702400"; d="scan'208";a="245189567" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Mar 2022 21:00:14 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.90,200,1643702400"; d="scan'208";a="543512245" Received: from npg-dpdk-xuan-cbdma.sh.intel.com ([10.67.110.228]) by orsmga007.jf.intel.com with ESMTP; 21 Mar 2022 21:00:11 -0700 From: xuan.ding@intel.com To: thomas@monjalon.net, ferruh.yigit@intel.com, andrew.rybchenko@oktetlabs.ru Cc: dev@dpdk.org, stephen@networkplumber.org, mb@smartsharesystems.com, viacheslavo@nvidia.com, qi.z.zhang@intel.com, ping.yu@intel.com, wenxuanx.wu@intel.com, Xuan Ding , Yuan Wang Subject: [RFC,v2 3/3] net/ice: support header split in Rx data path Date: Tue, 22 Mar 2022 03:56:29 +0000 Message-Id: <20220322035629.18756-4-xuan.ding@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20220322035629.18756-1-xuan.ding@intel.com> References: <20220303060136.36427-1-xuan.ding@intel.com> <20220322035629.18756-1-xuan.ding@intel.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Xuan Ding This patch adds support for header split in normal Rx data paths. When the Rx queue is configured with header split for specific protocol type, packets received will be directly splited into header and payload parts. And the two parts will be put into different mempools. Currently, header split is not supported in vectorized paths. Signed-off-by: Xuan Ding Signed-off-by: Yuan Wang --- drivers/net/ice/ice_ethdev.c | 10 +- drivers/net/ice/ice_rxtx.c | 220 ++++++++++++++++++++++---- drivers/net/ice/ice_rxtx.h | 16 ++ drivers/net/ice/ice_rxtx_vec_common.h | 3 + 4 files changed, 218 insertions(+), 31 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 1a469afeac..c9762d810d 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -3707,7 +3707,8 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | RTE_ETH_RX_OFFLOAD_RSS_HASH | - RTE_ETH_RX_OFFLOAD_TIMESTAMP; + RTE_ETH_RX_OFFLOAD_TIMESTAMP | + RTE_ETH_RX_OFFLOAD_HEADER_SPLIT; dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT | RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | @@ -3719,7 +3720,7 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL; } - dev_info->rx_queue_offload_capa = 0; + dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_HEADER_SPLIT; dev_info->tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; dev_info->reta_size = pf->hash_lut_size; @@ -3788,6 +3789,11 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN; dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN; + dev_info->rx_seg_capa.max_nseg = ICE_RX_MAX_NSEG; + dev_info->rx_seg_capa.multi_pools = 1; + dev_info->rx_seg_capa.offset_allowed = 0; + dev_info->rx_seg_capa.offset_align_log2 = 0; + return 0; } diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 4f218bcd0d..fbc88c7473 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -282,7 +282,6 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) /* Set buffer size as the head split is disabled. */ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM); - rxq->rx_hdr_len = 0; rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); rxq->max_pkt_len = RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, @@ -311,11 +310,51 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) memset(&rx_ctx, 0, sizeof(rx_ctx)); + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT) { + switch (rxq->rxseg[0].proto) { + case RTE_ETH_RX_HEADER_SPLIT_MAC: + rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; + rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_L2; + break; + case RTE_ETH_RX_HEADER_SPLIT_INNER_MAC: + rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; + rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2; + break; + case RTE_ETH_RX_HEADER_SPLIT_INNER_IPV4: + case RTE_ETH_RX_HEADER_SPLIT_INNER_IPV6: + case RTE_ETH_RX_HEADER_SPLIT_INNER_L3: + rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; + rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP; + break; + case RTE_ETH_RX_HEADER_SPLIT_TCP: + case RTE_ETH_RX_HEADER_SPLIT_UDP: + case RTE_ETH_RX_HEADER_SPLIT_INNER_TCP: + case RTE_ETH_RX_HEADER_SPLIT_INNER_UDP: + rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; + rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP; + break; + case RTE_ETH_RX_HEADER_SPLIT_SCTP: + case RTE_ETH_RX_HEADER_SPLIT_INNER_SCTP: + rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; + rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP; + break; + case RTE_ETH_RX_HEADER_SPLIT_NONE: + PMD_DRV_LOG(ERR, "Header split protocol must be configured"); + return -EINVAL; + default: + PMD_DRV_LOG(ERR, "Header split protocol is not supported"); + return -EINVAL; + } + rxq->rx_hdr_len = ICE_RX_HDR_BUF_SIZE; + } else { + rxq->rx_hdr_len = 0; + rx_ctx.dtype = 0; /* No Header Split mode */ + } + rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; rx_ctx.qlen = rxq->nb_rx_desc; rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; - rx_ctx.dtype = 0; /* No Header Split mode */ #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC rx_ctx.dsize = 1; /* 32B descriptors */ #endif @@ -401,6 +440,7 @@ ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq) for (i = 0; i < rxq->nb_rx_desc; i++) { volatile union ice_rx_flex_desc *rxd; + rxd = &rxq->rx_ring[i]; struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(!mbuf)) { @@ -408,8 +448,6 @@ ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq) return -ENOMEM; } - rte_mbuf_refcnt_set(mbuf, 1); - mbuf->next = NULL; mbuf->data_off = RTE_PKTMBUF_HEADROOM; mbuf->nb_segs = 1; mbuf->port = rxq->port_id; @@ -417,9 +455,32 @@ ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq) dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); - rxd = &rxq->rx_ring[i]; - rxd->read.pkt_addr = dma_addr; - rxd->read.hdr_addr = 0; + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT) { + struct rte_mbuf *mbuf_pay; + mbuf_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp); + if (unlikely(!mbuf_pay)) { + PMD_DRV_LOG(ERR, "Failed to allocate payload mbuf for RX"); + return -ENOMEM; + } + + mbuf_pay->next = NULL; + mbuf_pay->data_off = RTE_PKTMBUF_HEADROOM; + mbuf_pay->nb_segs = 1; + mbuf_pay->port = rxq->port_id; + mbuf->next = mbuf_pay; + + rxd->read.hdr_addr = dma_addr; + /* The LS bit should be set to zero regardless of + * header split enablement. + */ + rxd->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf_pay)); + } else { + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->next = NULL; + rxd->read.hdr_addr = 0; + rxd->read.pkt_addr = dma_addr; + } + #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC rxd->read.rsvd1 = 0; rxd->read.rsvd2 = 0; @@ -443,14 +504,14 @@ _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq) for (i = 0; i < rxq->nb_rx_desc; i++) { if (rxq->sw_ring[i].mbuf) { - rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rte_pktmbuf_free(rxq->sw_ring[i].mbuf); rxq->sw_ring[i].mbuf = NULL; } } if (rxq->rx_nb_avail == 0) return; for (i = 0; i < rxq->rx_nb_avail; i++) - rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]); + rte_pktmbuf_free(rxq->rx_stage[rxq->rx_next_avail + i]); rxq->rx_nb_avail = 0; } @@ -1076,6 +1137,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, uint16_t len; int use_def_burst_func = 1; uint64_t offloads; + uint16_t n_seg = rx_conf->rx_nseg; if (nb_desc % ICE_ALIGN_RING_DESC != 0 || nb_desc > ICE_MAX_RING_DESC || @@ -1087,6 +1149,22 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + if (mp) + n_seg = 1; + + if (n_seg > 1) { + if (!(offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)) { + PMD_INIT_LOG(ERR, "port %u queue index %u split offload not configured", + dev->data->port_id, queue_idx); + return -EINVAL; + } + if (n_seg > ICE_RX_MAX_NSEG) { + PMD_INIT_LOG(ERR, "port %u queue index %u split seg exceed maximum", + dev->data->port_id, queue_idx); + return -EINVAL; + } + } + /* Free memory if needed */ if (dev->data->rx_queues[queue_idx]) { ice_rx_queue_release(dev->data->rx_queues[queue_idx]); @@ -1098,12 +1176,22 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, sizeof(struct ice_rx_queue), RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq) { PMD_INIT_LOG(ERR, "Failed to allocate memory for " "rx queue data structure"); return -ENOMEM; } - rxq->mp = mp; + + rxq->rxseg_nb = n_seg; + if (n_seg > 1) { + rte_memcpy(rxq->rxseg, rx_conf->rx_seg, + sizeof(struct rte_eth_rxseg_split) * n_seg); + rxq->mp = rxq->rxseg[0].mp; + } else { + rxq->mp = mp; + } + rxq->nb_rx_desc = nb_desc; rxq->rx_free_thresh = rx_conf->rx_free_thresh; rxq->queue_id = queue_idx; @@ -1568,7 +1656,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) struct ice_rx_entry *rxep; struct rte_mbuf *mb; uint16_t stat_err0; - uint16_t pkt_len; + uint16_t pkt_len, hdr_len; int32_t s[ICE_LOOK_AHEAD], nb_dd; int32_t i, j, nb_rx = 0; uint64_t pkt_flags = 0; @@ -1616,6 +1704,24 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; mb->data_len = pkt_len; mb->pkt_len = pkt_len; + + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT) { + mb->nb_segs = (uint16_t)(mb->nb_segs + mb->next->nb_segs); + mb->next->next = NULL; + hdr_len = rte_le_to_cpu_16(rxdp[j].wb.hdr_len_sph_flex_flags1) & + ICE_RX_FLEX_DESC_HEADER_LEN_M; + pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + mb->data_len = hdr_len; + mb->pkt_len = hdr_len + pkt_len; + mb->next->data_len = pkt_len; + } else { + pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + mb->data_len = pkt_len; + mb->pkt_len = pkt_len; + } + mb->ol_flags = 0; stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0); pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0); @@ -1695,7 +1801,9 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) struct rte_mbuf *mb; uint16_t alloc_idx, i; uint64_t dma_addr; - int diag; + int diag, diag_pay; + uint64_t pay_addr; + struct rte_mbuf *mbufs_pay[rxq->rx_free_thresh]; /* Allocate buffers in bulk */ alloc_idx = (uint16_t)(rxq->rx_free_trigger - @@ -1708,6 +1816,15 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) return -ENOMEM; } + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT) { + diag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp, + (void *)mbufs_pay, rxq->rx_free_thresh); + if (unlikely(diag_pay != 0)) { + PMD_RX_LOG(ERR, "Failed to get payload mbufs in bulk"); + return -ENOMEM; + } + } + rxdp = &rxq->rx_ring[alloc_idx]; for (i = 0; i < rxq->rx_free_thresh; i++) { if (likely(i < (rxq->rx_free_thresh - 1))) @@ -1716,13 +1833,21 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) mb = rxep[i].mbuf; rte_mbuf_refcnt_set(mb, 1); - mb->next = NULL; mb->data_off = RTE_PKTMBUF_HEADROOM; mb->nb_segs = 1; mb->port = rxq->port_id; dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb)); - rxdp[i].read.hdr_addr = 0; - rxdp[i].read.pkt_addr = dma_addr; + + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT) { + mb->next = mbufs_pay[i]; + pay_addr = rte_mbuf_data_iova_default(mbufs_pay[i]); + rxdp[i].read.hdr_addr = dma_addr; + rxdp[i].read.pkt_addr = rte_cpu_to_le_64(pay_addr); + } else { + mb->next = NULL; + rxdp[i].read.hdr_addr = 0; + rxdp[i].read.pkt_addr = dma_addr; + } } /* Update Rx tail register */ @@ -2315,11 +2440,13 @@ ice_recv_pkts(void *rx_queue, struct ice_rx_entry *sw_ring = rxq->sw_ring; struct ice_rx_entry *rxe; struct rte_mbuf *nmb; /* new allocated mbuf */ + struct rte_mbuf *nmb_pay; /* new allocated payload mbuf */ struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */ uint16_t rx_id = rxq->rx_tail; uint16_t nb_rx = 0; uint16_t nb_hold = 0; uint16_t rx_packet_len; + uint16_t rx_header_len; uint16_t rx_stat_err0; uint64_t dma_addr; uint64_t pkt_flags; @@ -2342,12 +2469,16 @@ ice_recv_pkts(void *rx_queue, if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) break; - /* allocate mbuf */ + if (rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S)) + break; + + /* allocate header mbuf */ nmb = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(!nmb)) { rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++; break; } + rxd = *rxdp; /* copy descriptor in ring to temp variable*/ nb_hold++; @@ -2360,24 +2491,55 @@ ice_recv_pkts(void *rx_queue, dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); - /** - * fill the read format of descriptor with physic address in - * new allocated mbuf: nmb - */ - rxdp->read.hdr_addr = 0; - rxdp->read.pkt_addr = dma_addr; + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT) { + /* allocate payload mbuf */ + nmb_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp); + if (unlikely(!nmb_pay)) { + rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++; + break; + } + + nmb->next = nmb_pay; + nmb_pay->next = NULL; - /* calculate rx_packet_len of the received pkt */ - rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & - ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + /** + * fill the read format of descriptor with physic address in + * new allocated mbuf: nmb + */ + rxdp->read.hdr_addr = dma_addr; + rxdp->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb_pay)); + } else { + /** + * fill the read format of descriptor with physic address in + * new allocated mbuf: nmb + */ + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + } /* fill old mbuf with received descriptor: rxd */ rxm->data_off = RTE_PKTMBUF_HEADROOM; rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); - rxm->nb_segs = 1; - rxm->next = NULL; - rxm->pkt_len = rx_packet_len; - rxm->data_len = rx_packet_len; + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT) { + rxm->nb_segs = (uint16_t)(rxm->nb_segs + rxm->next->nb_segs); + rxm->next->next = NULL; + /* calculate rx_packet_len of the received pkt */ + rx_header_len = rte_le_to_cpu_16(rxd.wb.hdr_len_sph_flex_flags1) & + ICE_RX_FLEX_DESC_HEADER_LEN_M; + rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + rxm->data_len = rx_header_len; + rxm->pkt_len = rx_header_len + rx_packet_len; + rxm->next->data_len = rx_packet_len; + } else { + rxm->nb_segs = 1; + rxm->next = NULL; + /* calculate rx_packet_len of the received pkt */ + rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & + ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; + rxm->data_len = rx_packet_len; + rxm->pkt_len = rx_packet_len; + } rxm->port = rxq->port_id; rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index bb18a01951..611dbc8503 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -16,6 +16,9 @@ #define ICE_RX_MAX_BURST 32 #define ICE_TX_MAX_BURST 32 +/* Maximal number of segments to split. */ +#define ICE_RX_MAX_NSEG 2 + #define ICE_CHK_Q_ENA_COUNT 100 #define ICE_CHK_Q_ENA_INTERVAL_US 100 @@ -43,6 +46,11 @@ extern uint64_t ice_timestamp_dynflag; extern int ice_timestamp_dynfield_offset; +/* Max header size can be 2K - 64 bytes */ +#define ICE_RX_HDR_BUF_SIZE (2048 - 64) + +#define ICE_HEADER_SPLIT_ENA BIT(0) + typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq); typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq); typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq, @@ -53,6 +61,12 @@ struct ice_rx_entry { struct rte_mbuf *mbuf; }; +enum ice_rx_dtype { + ICE_RX_DTYPE_NO_SPLIT = 0, + ICE_RX_DTYPE_HEADER_SPLIT = 1, + ICE_RX_DTYPE_SPLIT_ALWAYS = 2, +}; + struct ice_rx_queue { struct rte_mempool *mp; /* mbuf pool to populate RX ring */ volatile union ice_rx_flex_desc *rx_ring;/* RX ring virtual address */ @@ -95,6 +109,8 @@ struct ice_rx_queue { uint32_t time_high; uint32_t hw_register_set; const struct rte_memzone *mz; + struct rte_eth_rxseg_split rxseg[ICE_RX_MAX_NSEG]; + uint32_t rxseg_nb; }; struct ice_tx_entry { diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h index 8ff01046e1..db394ceca8 100644 --- a/drivers/net/ice/ice_rxtx_vec_common.h +++ b/drivers/net/ice/ice_rxtx_vec_common.h @@ -290,6 +290,9 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq) if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) return -1; + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT) + return -1; + if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD) return ICE_VECTOR_OFFLOAD_PATH; -- 2.17.1