From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5D4A942A12 for ; Fri, 28 Apr 2023 05:37:05 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4E15E42D9B; Fri, 28 Apr 2023 05:37:05 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 54537406B5; Fri, 28 Apr 2023 05:37:03 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1004) id 6A62B21C33EC; Thu, 27 Apr 2023 20:37:02 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 6A62B21C33EC DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linuxonhyperv.com; s=default; t=1682653022; bh=AWameAOioscbLXXf7V9XmMBMJ7hyn0UTTcUbhq/BRpU=; h=From:To:Cc:Subject:Date:From; b=VM6Y5q3kTWtw6O82hchEDoAL5W3QFNk0k2Hy8I1mbB8BDQ5E4EabBiWhPPkxR2vpp QpdgV8q+GCV//uBCx2c6YPa2MbGSf0aO7EQdYQbLzA8kCjY/+7wnsHaXXtgyUHIe0t pmNWkeWRrvuzc6euWnQLFJw9AtOyjsDQM/+3thaQ= From: longli@linuxonhyperv.com To: Ferruh Yigit Cc: dev@dpdk.org, Ajay Sharma , Long Li , stable@dpdk.org Subject: [PATCH] net/mana: suppress TX CQE generation whenever possible Date: Thu, 27 Apr 2023 20:36:49 -0700 Message-Id: <1682653009-19988-1-git-send-email-longli@linuxonhyperv.com> X-Mailer: git-send-email 1.8.3.1 X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: stable-bounces@dpdk.org From: Long Li When sending TX packets, we don't need a completion for every packet sent. If packets are sent in a series, the completion of the last packet can be used to indicate completion of all prior packets. Signed-off-by: Long Li Cc: stable@dpdk.org --- drivers/net/mana/mana.h | 3 ++- drivers/net/mana/tx.c | 33 ++++++++++++++++++++++++++++++--- 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h index c79d39daa2..f280d66f6e 100644 --- a/drivers/net/mana/mana.h +++ b/drivers/net/mana/mana.h @@ -353,6 +353,7 @@ struct mana_priv { struct mana_txq_desc { struct rte_mbuf *pkt; uint32_t wqe_size_in_bu; + bool suppress_tx_cqe; }; struct mana_rxq_desc { @@ -401,7 +402,7 @@ struct mana_txq { /* desc_ring_head is where we put pending requests to ring, * completion pull off desc_ring_tail */ - uint32_t desc_ring_head, desc_ring_tail; + uint32_t desc_ring_head, desc_ring_tail, desc_ring_len; struct mana_mr_btree mr_btree; struct mana_stats stats; diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c index ee0319c71d..c8d3911f85 100644 --- a/drivers/net/mana/tx.c +++ b/drivers/net/mana/tx.c @@ -43,9 +43,11 @@ mana_stop_tx_queues(struct rte_eth_dev *dev) txq->desc_ring_tail = (txq->desc_ring_tail + 1) % txq->num_desc; + txq->desc_ring_len--; } txq->desc_ring_head = 0; txq->desc_ring_tail = 0; + txq->desc_ring_len = 0; memset(&txq->gdma_sq, 0, sizeof(txq->gdma_sq)); memset(&txq->gdma_cq, 0, sizeof(txq->gdma_cq)); @@ -173,13 +175,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) int ret; void *db_page; uint16_t pkt_sent = 0; - uint32_t num_comp; + uint32_t num_comp, i; /* Process send completions from GDMA */ num_comp = gdma_poll_completion_queue(&txq->gdma_cq, txq->gdma_comp_buf, txq->num_desc); - for (uint32_t i = 0; i < num_comp; i++) { + i = 0; + while (i < num_comp) { struct mana_txq_desc *desc = &txq->desc_ring[txq->desc_ring_tail]; struct mana_tx_comp_oob *oob = (struct mana_tx_comp_oob *) @@ -204,7 +207,16 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) desc->pkt = NULL; txq->desc_ring_tail = (txq->desc_ring_tail + 1) % txq->num_desc; + txq->desc_ring_len--; txq->gdma_sq.tail += desc->wqe_size_in_bu; + + /* If TX CQE suppression is used, don't read more CQE but move + * on to the next packet + */ + if (desc->suppress_tx_cqe) + continue; + + i++; } /* Post send requests to GDMA */ @@ -215,6 +227,9 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct one_sgl sgl; uint16_t seg_idx; + if (txq->desc_ring_len >= txq->num_desc) + break; + /* Drop the packet if it exceeds max segments */ if (m_pkt->nb_segs > priv->max_send_sge) { DRV_LOG(ERR, "send packet segments %d exceeding max", @@ -310,7 +325,6 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) tx_oob.short_oob.tx_compute_UDP_checksum = 0; } - tx_oob.short_oob.suppress_tx_CQE_generation = 0; tx_oob.short_oob.VCQ_number = txq->gdma_cq.id; tx_oob.short_oob.VSQ_frame_num = @@ -362,6 +376,16 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) if (seg_idx != m_pkt->nb_segs) continue; + /* If we can at least queue post two WQEs and there are at + * least two packets to send, use TX CQE suppression for the + * current WQE + */ + if (txq->desc_ring_len + 1 < txq->num_desc && + pkt_idx + 1 < nb_pkts) + tx_oob.short_oob.suppress_tx_CQE_generation = 1; + else + tx_oob.short_oob.suppress_tx_CQE_generation = 0; + struct gdma_work_request work_req; uint32_t wqe_size_in_bu; @@ -384,8 +408,11 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Update queue for tracking pending requests */ desc->pkt = m_pkt; desc->wqe_size_in_bu = wqe_size_in_bu; + desc->suppress_tx_cqe = + tx_oob.short_oob.suppress_tx_CQE_generation; txq->desc_ring_head = (txq->desc_ring_head + 1) % txq->num_desc; + txq->desc_ring_len++; pkt_sent++; -- 2.32.0