From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 531DB45D4C; Wed, 20 Nov 2024 04:14:57 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1A587432D2; Wed, 20 Nov 2024 04:14:05 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 04F8E42EE0 for ; Wed, 20 Nov 2024 04:13:44 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1213) id 75950205A74D; Tue, 19 Nov 2024 19:13:42 -0800 (PST) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 75950205A74D DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1732072422; bh=RlR7Xw9YX7GIGnZJGVEmawiAln6O4vPm0fDuGPsIs7s=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=LRdAT0/05a/Ytb7VPPKlXRR5OjMpLXCMuXVxCZc0WW0jlxafTUPUDIJxKDopSdP5Q kGkDDjQAWHfTEUlJd2tBo7FtOhbpG8tBmJWx9eehYDxWPxTtJkYFp5uqhvJMW3efJn uUThQ+3VPNJAt/dMf2EJDMZ45ep1i4l7u+XuUTek= From: Andre Muezerie To: dev@dpdk.org Cc: Konstantin Ananyev Subject: [PATCH v10 11/21] net/ice: remove use of VLAs Date: Tue, 19 Nov 2024 19:13:11 -0800 Message-Id: <1732072401-15962-12-git-send-email-andremue@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1732072401-15962-1-git-send-email-andremue@linux.microsoft.com> References: <1713397319-26135-1-git-send-email-roretzla@linux.microsoft.com> <1732072401-15962-1-git-send-email-andremue@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Konstantin Ananyev ../drivers/net/ice/ice_rxtx.c:1871:29: warning: variable length array used [-Wvla] Here VLA is used as a temp array for mbufs that will be used as a split RX data buffers. As at any given time only one thread can do RX from particular queue, at rx_queue_setup() we can allocate extra space for that array, and then safely use it at RX fast-path. Signed-off-by: Konstantin Ananyev Acked-by: Anatoly Burakov --- drivers/net/ice/ice_rxtx.c | 18 ++++++++++++------ drivers/net/ice/ice_rxtx.h | 2 ++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 0c7106c7e0..578453ec89 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -1186,7 +1186,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, struct ice_vsi *vsi = pf->main_vsi; struct ice_rx_queue *rxq; const struct rte_memzone *rz; - uint32_t ring_size; + uint32_t ring_size, tlen; uint16_t len; int use_def_burst_func = 1; uint64_t offloads; @@ -1294,9 +1294,14 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, /* always reserve more for bulk alloc */ len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST); + /* allocate extra entries for SW split buffer */ + tlen = ((rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0) ? + rxq->rx_free_thresh : 0; + tlen += len; + /* Allocate the software ring. */ rxq->sw_ring = rte_zmalloc_socket(NULL, - sizeof(struct ice_rx_entry) * len, + sizeof(struct ice_rx_entry) * tlen, RTE_CACHE_LINE_SIZE, socket_id); if (!rxq->sw_ring) { @@ -1305,6 +1310,8 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } + rxq->sw_split_buf = (tlen == len) ? NULL : rxq->sw_ring + len; + ice_reset_rx_queue(rxq); rxq->q_set = true; dev->data->rx_queues[queue_idx] = rxq; @@ -1883,7 +1890,6 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) uint64_t dma_addr; int diag, diag_pay; uint64_t pay_addr; - struct rte_mbuf *mbufs_pay[rxq->rx_free_thresh]; /* Allocate buffers in bulk */ alloc_idx = (uint16_t)(rxq->rx_free_trigger - @@ -1898,7 +1904,7 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { diag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp, - (void *)mbufs_pay, rxq->rx_free_thresh); + (void *)rxq->sw_split_buf, rxq->rx_free_thresh); if (unlikely(diag_pay != 0)) { PMD_RX_LOG(ERR, "Failed to get payload mbufs in bulk"); return -ENOMEM; @@ -1923,8 +1929,8 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) rxdp[i].read.hdr_addr = 0; rxdp[i].read.pkt_addr = dma_addr; } else { - mb->next = mbufs_pay[i]; - pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbufs_pay[i])); + mb->next = rxq->sw_split_buf[i].mbuf; + pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb->next)); rxdp[i].read.hdr_addr = dma_addr; rxdp[i].read.pkt_addr = pay_addr; } diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 45f25b3609..20ee325c2b 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -139,6 +139,8 @@ struct ice_rx_queue { uint32_t hw_time_high; /* high 32 bits of timestamp */ uint32_t hw_time_low; /* low 32 bits of timestamp */ uint64_t hw_time_update; /* SW time of HW record updating */ + struct ice_rx_entry *sw_split_buf; + /* address of temp buffer for RX split mbufs */ struct rte_eth_rxseg_split rxseg[ICE_RX_MAX_NSEG]; uint32_t rxseg_nb; bool ts_enable; /* if rxq timestamp is enabled */ -- 2.47.0.vfs.0.3