patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Ferruh Yigit <ferruh.yigit@amd.com>
To: longli@linuxonhyperv.com,
	Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,
	Luca Boccassi <bluca@debian.org>,
	Kevin Traynor <ktraynor@redhat.com>
Cc: dev@dpdk.org, Ajay Sharma <sharmaajay@microsoft.com>,
	Long Li <longli@microsoft.com>,
	stable@dpdk.org
Subject: Re: [PATCH v2] net/mana: suppress TX CQE generation whenever possible
Date: Fri, 19 May 2023 17:34:05 +0100	[thread overview]
Message-ID: <e5555440-7064-831d-9fc3-e93ffef239b8@amd.com> (raw)
In-Reply-To: <1683336767-19526-1-git-send-email-longli@linuxonhyperv.com>

On 5/6/2023 2:32 AM, longli@linuxonhyperv.com wrote:
> From: Long Li <longli@microsoft.com>
> 
> When sending TX packets, we don't need a completion for every packet sent.
> If packets are sent in a series, the completion of the last packet can be
> used to indicate completion of all prior packets.
> 
> Cc: stable@dpdk.org
>

Hi Long,

Patch looks good to me, but I am not sure on the backport request.
This patch is an optimisation update and we tend to get only fixes to
stable trees.

LTS maintainers cc'ed for comment.


> Signed-off-by: Long Li <longli@microsoft.com>
> ---
> Change log
> v2: rebased to dpdk-next-net
> 
>  drivers/net/mana/mana.h |  3 ++-
>  drivers/net/mana/tx.c   | 33 ++++++++++++++++++++++++++++++---
>  2 files changed, 32 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
> index b653e1dd82..be88537f1a 100644
> --- a/drivers/net/mana/mana.h
> +++ b/drivers/net/mana/mana.h
> @@ -353,6 +353,7 @@ struct mana_priv {
>  struct mana_txq_desc {
>  	struct rte_mbuf *pkt;
>  	uint32_t wqe_size_in_bu;
> +	bool suppress_tx_cqe;
>  };
>  
>  struct mana_rxq_desc {
> @@ -401,7 +402,7 @@ struct mana_txq {
>  	/* desc_ring_head is where we put pending requests to ring,
>  	 * completion pull off desc_ring_tail
>  	 */
> -	uint32_t desc_ring_head, desc_ring_tail;
> +	uint32_t desc_ring_head, desc_ring_tail, desc_ring_len;
>  
>  	struct mana_mr_btree mr_btree;
>  	struct mana_stats stats;
> diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
> index 7f570181ad..5947efbe8d 100644
> --- a/drivers/net/mana/tx.c
> +++ b/drivers/net/mana/tx.c
> @@ -43,9 +43,11 @@ mana_stop_tx_queues(struct rte_eth_dev *dev)
>  
>  			txq->desc_ring_tail =
>  				(txq->desc_ring_tail + 1) % txq->num_desc;
> +			txq->desc_ring_len--;
>  		}
>  		txq->desc_ring_head = 0;
>  		txq->desc_ring_tail = 0;
> +		txq->desc_ring_len = 0;
>  
>  		memset(&txq->gdma_sq, 0, sizeof(txq->gdma_sq));
>  		memset(&txq->gdma_cq, 0, sizeof(txq->gdma_cq));
> @@ -173,13 +175,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  	int ret;
>  	void *db_page;
>  	uint16_t pkt_sent = 0;
> -	uint32_t num_comp;
> +	uint32_t num_comp, i;
>  
>  	/* Process send completions from GDMA */
>  	num_comp = gdma_poll_completion_queue(&txq->gdma_cq,
>  			txq->gdma_comp_buf, txq->num_desc);
>  
> -	for (uint32_t i = 0; i < num_comp; i++) {
> +	i = 0;
> +	while (i < num_comp) {
>  		struct mana_txq_desc *desc =
>  			&txq->desc_ring[txq->desc_ring_tail];
>  		struct mana_tx_comp_oob *oob = (struct mana_tx_comp_oob *)
> @@ -204,7 +207,16 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  
>  		desc->pkt = NULL;
>  		txq->desc_ring_tail = (txq->desc_ring_tail + 1) % txq->num_desc;
> +		txq->desc_ring_len--;
>  		txq->gdma_sq.tail += desc->wqe_size_in_bu;
> +
> +		/* If TX CQE suppression is used, don't read more CQE but move
> +		 * on to the next packet
> +		 */
> +		if (desc->suppress_tx_cqe)
> +			continue;
> +
> +		i++;
>  	}
>  
>  	/* Post send requests to GDMA */
> @@ -215,6 +227,9 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  		struct one_sgl sgl;
>  		uint16_t seg_idx;
>  
> +		if (txq->desc_ring_len >= txq->num_desc)
> +			break;
> +
>  		/* Drop the packet if it exceeds max segments */
>  		if (m_pkt->nb_segs > priv->max_send_sge) {
>  			DP_LOG(ERR, "send packet segments %d exceeding max",
> @@ -310,7 +325,6 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  			tx_oob.short_oob.tx_compute_UDP_checksum = 0;
>  		}
>  
> -		tx_oob.short_oob.suppress_tx_CQE_generation = 0;
>  		tx_oob.short_oob.VCQ_number = txq->gdma_cq.id;
>  
>  		tx_oob.short_oob.VSQ_frame_num =
> @@ -362,6 +376,16 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  		if (seg_idx != m_pkt->nb_segs)
>  			continue;
>  
> +		/* If we can at least queue post two WQEs and there are at
> +		 * least two packets to send, use TX CQE suppression for the
> +		 * current WQE
> +		 */
> +		if (txq->desc_ring_len + 1 < txq->num_desc &&
> +		    pkt_idx + 1 < nb_pkts)
> +			tx_oob.short_oob.suppress_tx_CQE_generation = 1;
> +		else
> +			tx_oob.short_oob.suppress_tx_CQE_generation = 0;
> +
>  		struct gdma_work_request work_req;
>  		uint32_t wqe_size_in_bu;
>  
> @@ -384,8 +408,11 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  			/* Update queue for tracking pending requests */
>  			desc->pkt = m_pkt;
>  			desc->wqe_size_in_bu = wqe_size_in_bu;
> +			desc->suppress_tx_cqe =
> +				tx_oob.short_oob.suppress_tx_CQE_generation;
>  			txq->desc_ring_head =
>  				(txq->desc_ring_head + 1) % txq->num_desc;
> +			txq->desc_ring_len++;
>  
>  			pkt_sent++;
>  


  reply	other threads:[~2023-05-19 16:34 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-28  3:36 [PATCH] " longli
2023-05-06  1:32 ` [PATCH v2] " longli
2023-05-19 16:34   ` Ferruh Yigit [this message]
2023-05-19 16:40   ` Ferruh Yigit
2023-05-26  0:19   ` EXTERNAL] [PATCH ] Add checks for the port capabilties Ajay Sharma
2023-05-26  3:30     ` Stephen Hemminger
2023-05-29  1:41       ` [EXTERNAL] " Ajay Sharma

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=e5555440-7064-831d-9fc3-e93ffef239b8@amd.com \
    --to=ferruh.yigit@amd.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=bluca@debian.org \
    --cc=dev@dpdk.org \
    --cc=ktraynor@redhat.com \
    --cc=longli@linuxonhyperv.com \
    --cc=longli@microsoft.com \
    --cc=sharmaajay@microsoft.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).