From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3EB3941EC5; Sat, 18 Mar 2023 00:32:58 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0F34940691; Sat, 18 Mar 2023 00:32:58 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id BBECC4021D; Sat, 18 Mar 2023 00:32:55 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1004) id EF21F2057BE5; Fri, 17 Mar 2023 16:32:54 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com EF21F2057BE5 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linuxonhyperv.com; s=default; t=1679095974; bh=UiMARrT6AfNiTn2sgvMpbk7Mn/iNgq/WkG5qpF6lSeQ=; h=From:To:Cc:Subject:Date:From; b=Lh/2Xf6G5C/1OdSpyVszZbQ/XY0mFWm8vmqbyXlERkPQz1fFQYu04hJKDhlw+n4He cjjuVTI/o4fUh0i8Ir6xKTLbb2V5WgUgC/cjuZuB9p1MQ8sS9MwD6GhnOH+GM4xeJv 8aknTBKH8JRYnJdaRGJr6MXJ/GO3LztS4JpZGddc= From: longli@linuxonhyperv.com To: Ferruh Yigit Cc: dev@dpdk.org, Ajay Sharma , Long Li , stable@dpdk.org Subject: [PATCH 1/2] net/mana: avoid unnecessary assignments in data path Date: Fri, 17 Mar 2023 16:32:43 -0700 Message-Id: <1679095964-18532-1-git-send-email-longli@linuxonhyperv.com> X-Mailer: git-send-email 1.8.3.1 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Long Li Unnecessary assignments involve memset and waste CPU cycles. Removing them to reduce CPU usage. Fixes: 517ed6e2d590 ("net/mana: add basic driver with build environment") Cc: stable@dpdk.org Signed-off-by: Long Li --- drivers/net/mana/gdma.c | 11 ++--------- drivers/net/mana/mana.h | 2 +- drivers/net/mana/rx.c | 9 ++++----- drivers/net/mana/tx.c | 17 ++++++++++------- 4 files changed, 17 insertions(+), 22 deletions(-) diff --git a/drivers/net/mana/gdma.c b/drivers/net/mana/gdma.c index 3d4039014f..0922463ef9 100644 --- a/drivers/net/mana/gdma.c +++ b/drivers/net/mana/gdma.c @@ -123,7 +123,7 @@ write_scatter_gather_list(uint8_t *work_queue_head_pointer, int gdma_post_work_request(struct mana_gdma_queue *queue, struct gdma_work_request *work_req, - struct gdma_posted_wqe_info *wqe_info) + uint32_t *wqe_size_in_bu) { uint32_t client_oob_size = work_req->inline_oob_size_in_bytes > @@ -149,14 +149,7 @@ gdma_post_work_request(struct mana_gdma_queue *queue, DRV_LOG(DEBUG, "client_oob_size %u sgl_data_size %u wqe_size %u", client_oob_size, sgl_data_size, wqe_size); - if (wqe_info) { - wqe_info->wqe_index = - ((queue->head * GDMA_WQE_ALIGNMENT_UNIT_SIZE) & - (queue->size - 1)) / GDMA_WQE_ALIGNMENT_UNIT_SIZE; - wqe_info->unmasked_queue_offset = queue->head; - wqe_info->wqe_size_in_bu = - wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE; - } + *wqe_size_in_bu = wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE; wq_buffer_pointer = gdma_get_wqe_pointer(queue); wq_buffer_pointer += write_dma_client_oob(wq_buffer_pointer, work_req, diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h index 4a05238a96..d4a1ba8492 100644 --- a/drivers/net/mana/mana.h +++ b/drivers/net/mana/mana.h @@ -459,7 +459,7 @@ int mana_rq_ring_doorbell(struct mana_rxq *rxq, uint8_t arm); int gdma_post_work_request(struct mana_gdma_queue *queue, struct gdma_work_request *work_req, - struct gdma_posted_wqe_info *wqe_info); + uint32_t *wqe_size_in_bu); uint8_t *gdma_get_wqe_pointer(struct mana_gdma_queue *queue); uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **rx_pkts, diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c index 55247889c1..bdbd11c5f9 100644 --- a/drivers/net/mana/rx.c +++ b/drivers/net/mana/rx.c @@ -52,8 +52,8 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) { struct rte_mbuf *mbuf = NULL; struct gdma_sgl_element sgl[1]; - struct gdma_work_request request = {0}; - struct gdma_posted_wqe_info wqe_info = {0}; + struct gdma_work_request request; + uint32_t wqe_size_in_bu; struct mana_priv *priv = rxq->priv; int ret; struct mana_mr_cache *mr; @@ -72,7 +72,6 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) } request.gdma_header.struct_size = sizeof(request); - wqe_info.gdma_header.struct_size = sizeof(wqe_info); sgl[0].address = rte_cpu_to_le_64(rte_pktmbuf_mtod(mbuf, uint64_t)); sgl[0].memory_key = mr->lkey; @@ -87,14 +86,14 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq) request.flags = 0; request.client_data_unit = NOT_USING_CLIENT_DATA_UNIT; - ret = gdma_post_work_request(&rxq->gdma_rq, &request, &wqe_info); + ret = gdma_post_work_request(&rxq->gdma_rq, &request, &wqe_size_in_bu); if (!ret) { struct mana_rxq_desc *desc = &rxq->desc_ring[rxq->desc_ring_head]; /* update queue for tracking pending packets */ desc->pkt = mbuf; - desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu; + desc->wqe_size_in_bu = wqe_size_in_bu; rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc; } else { DRV_LOG(ERR, "failed to post recv ret %d", ret); diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c index 300bf27cc1..a7ee47c582 100644 --- a/drivers/net/mana/tx.c +++ b/drivers/net/mana/tx.c @@ -208,8 +208,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) for (uint16_t pkt_idx = 0; pkt_idx < nb_pkts; pkt_idx++) { struct rte_mbuf *m_pkt = tx_pkts[pkt_idx]; struct rte_mbuf *m_seg = m_pkt; - struct transmit_oob_v2 tx_oob = {0}; - struct one_sgl sgl = {0}; + struct transmit_oob_v2 tx_oob; + struct one_sgl sgl; uint16_t seg_idx; /* Drop the packet if it exceeds max segments */ @@ -263,6 +263,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) tx_oob.short_oob.tx_compute_TCP_checksum = 1; tx_oob.short_oob.tx_transport_header_offset = m_pkt->l2_len + m_pkt->l3_len; + } else { + tx_oob.short_oob.tx_compute_TCP_checksum = 0; } if ((m_pkt->ol_flags & RTE_MBUF_F_TX_L4_MASK) == @@ -301,6 +303,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } tx_oob.short_oob.tx_compute_UDP_checksum = 1; + } else { + tx_oob.short_oob.tx_compute_UDP_checksum = 0; } tx_oob.short_oob.suppress_tx_CQE_generation = 0; @@ -355,11 +359,10 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) if (seg_idx != m_pkt->nb_segs) continue; - struct gdma_work_request work_req = {0}; - struct gdma_posted_wqe_info wqe_info = {0}; + struct gdma_work_request work_req; + uint32_t wqe_size_in_bu; work_req.gdma_header.struct_size = sizeof(work_req); - wqe_info.gdma_header.struct_size = sizeof(wqe_info); work_req.sgl = sgl.gdma_sgl; work_req.num_sgl_elements = m_pkt->nb_segs; @@ -370,14 +373,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) work_req.client_data_unit = NOT_USING_CLIENT_DATA_UNIT; ret = gdma_post_work_request(&txq->gdma_sq, &work_req, - &wqe_info); + &wqe_size_in_bu); if (!ret) { struct mana_txq_desc *desc = &txq->desc_ring[txq->desc_ring_head]; /* Update queue for tracking pending requests */ desc->pkt = m_pkt; - desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu; + desc->wqe_size_in_bu = wqe_size_in_bu; txq->desc_ring_head = (txq->desc_ring_head + 1) % txq->num_desc; -- 2.32.0