From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 436BE45C9A; Fri, 8 Nov 2024 01:46:47 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 294E142FAF; Fri, 8 Nov 2024 01:46:04 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 346D842ECD for ; Fri, 8 Nov 2024 01:45:54 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1213) id 12CE82130824; Thu, 7 Nov 2024 16:45:52 -0800 (PST) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 12CE82130824 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1731026753; bh=Yb/o2KyQMTyUVo5PtGv2kQHQJ77s75IGI+Oo3beA2U0=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=NR5GVXI/BGQl5sEswgDWXVuDdSxxCPlvLjuSS6iiGxMYH6naET/zBVg49h/dnsKek Kh5VKXBDR8/DCcKbLWbGUuuWM8hSNkRlElJuuc6y9EpnKuuP3FcDv8Lc52GoRHojTK /w8Q4xEiRn1Mt1pi8VIlMUmDmMVuVV8fiyXL7Bkw= From: Andre Muezerie To: dev@dpdk.org Cc: Konstantin Ananyev Subject: [PATCH v5 11/20] net/ice: remove use of VLAs Date: Thu, 7 Nov 2024 16:44:42 -0800 Message-Id: <1731026691-1529-12-git-send-email-andremue@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1731026691-1529-1-git-send-email-andremue@linux.microsoft.com> References: <1713397319-26135-1-git-send-email-roretzla@linux.microsoft.com> <1731026691-1529-1-git-send-email-andremue@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Konstantin Ananyev ../drivers/net/ice/ice_rxtx.c:1871:29: warning: variable length array used [-Wvla] Here VLA is used as a temp array for mbufs that will be used as a split RX data buffers. As at any given time only one thread can do RX from particular queue, at rx_queue_setup() we can allocate extra space for that array, and then safely use it at RX fast-path. Signed-off-by: Konstantin Ananyev --- drivers/net/ice/ice_rxtx.c | 18 ++++++++++++------ drivers/net/ice/ice_rxtx.h | 2 ++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 024d97cb46..a52a759031 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -1176,7 +1176,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, struct ice_vsi *vsi = pf->main_vsi; struct ice_rx_queue *rxq; const struct rte_memzone *rz; - uint32_t ring_size; + uint32_t ring_size, tlen; uint16_t len; int use_def_burst_func = 1; uint64_t offloads; @@ -1284,9 +1284,14 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, /* always reserve more for bulk alloc */ len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST); + /* allocate extra entries for SW split buffer */ + tlen = ((rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0) ? + rxq->rx_free_thresh : 0; + tlen += len; + /* Allocate the software ring. */ rxq->sw_ring = rte_zmalloc_socket(NULL, - sizeof(struct ice_rx_entry) * len, + sizeof(struct ice_rx_entry) * tlen, RTE_CACHE_LINE_SIZE, socket_id); if (!rxq->sw_ring) { @@ -1295,6 +1300,8 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } + rxq->sw_split_buf = (tlen == len) ? NULL : rxq->sw_ring + len; + ice_reset_rx_queue(rxq); rxq->q_set = true; dev->data->rx_queues[queue_idx] = rxq; @@ -1873,7 +1880,6 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) uint64_t dma_addr; int diag, diag_pay; uint64_t pay_addr; - struct rte_mbuf *mbufs_pay[rxq->rx_free_thresh]; /* Allocate buffers in bulk */ alloc_idx = (uint16_t)(rxq->rx_free_trigger - @@ -1888,7 +1894,7 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { diag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp, - (void *)mbufs_pay, rxq->rx_free_thresh); + (void *)rxq->sw_split_buf, rxq->rx_free_thresh); if (unlikely(diag_pay != 0)) { PMD_RX_LOG(ERR, "Failed to get payload mbufs in bulk"); return -ENOMEM; @@ -1913,8 +1919,8 @@ ice_rx_alloc_bufs(struct ice_rx_queue *rxq) rxdp[i].read.hdr_addr = 0; rxdp[i].read.pkt_addr = dma_addr; } else { - mb->next = mbufs_pay[i]; - pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbufs_pay[i])); + mb->next = rxq->sw_split_buf[i].mbuf; + pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb->next)); rxdp[i].read.hdr_addr = dma_addr; rxdp[i].read.pkt_addr = pay_addr; } diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index f7276cfc9f..d0f0b6c1d2 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -139,6 +139,8 @@ struct ice_rx_queue { uint32_t hw_time_high; /* high 32 bits of timestamp */ uint32_t hw_time_low; /* low 32 bits of timestamp */ uint64_t hw_time_update; /* SW time of HW record updating */ + struct ice_rx_entry *sw_split_buf; + /* address of temp buffer for RX split mbufs */ struct rte_eth_rxseg_split rxseg[ICE_RX_MAX_NSEG]; uint32_t rxseg_nb; bool ts_enable; /* if rxq timestamp is enabled */ -- 2.34.1