patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: Gaoxiang Liu <gaoxiangliu0@163.com>, chenbo.xia@intel.com
Cc: dev@dpdk.org, liugaoxiang@huawei.com, stable@dpdk.org
Subject: Re: [dpdk-stable] [PATCH] vhost: merge repeated loop in vhost Tx
Date: Thu, 23 Sep 2021 13:30:08 +0200	[thread overview]
Message-ID: <bab36866-e064-6df1-aa2b-defa9f04defa@redhat.com> (raw)
In-Reply-To: <20210910090530.893-1-gaoxiangliu0@163.com>



On 9/10/21 11:05, Gaoxiang Liu wrote:
> To improve performance of vhost Tx, merge repeated loop in eth_vhost_tx.
> Move "vlan insert" from eth_vhost_tx to virtio_dev_rx_packed
> and virtio_dev_rx_split to reduce a loop iteration.
> 
> Fixes: f63d356ee993 ("net/vhost: insert/strip VLAN header in software")
> Cc: stable@dpdk.org

This kind of performance optimization should not be backported to stable
branches.

> 
> Signed-off-by: Gaoxiang Liu <gaoxiangliu0@163.com>
> ---
>   drivers/net/vhost/rte_eth_vhost.c | 25 ++++---------------------
>   lib/vhost/virtio_net.c            | 21 +++++++++++++++++++++
>   2 files changed, 25 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
> index a202931e9a..ae20550976 100644
> --- a/drivers/net/vhost/rte_eth_vhost.c
> +++ b/drivers/net/vhost/rte_eth_vhost.c
> @@ -428,7 +428,6 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
>   {
>   	struct vhost_queue *r = q;
>   	uint16_t i, nb_tx = 0;
> -	uint16_t nb_send = 0;
>   	uint64_t nb_bytes = 0;
>   	uint64_t nb_missed = 0;
>   
> @@ -440,33 +439,17 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
>   	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>   		goto out;
>   
> -	for (i = 0; i < nb_bufs; i++) {
> -		struct rte_mbuf *m = bufs[i];
> -
> -		/* Do VLAN tag insertion */
> -		if (m->ol_flags & PKT_TX_VLAN_PKT) {
> -			int error = rte_vlan_insert(&m);
> -			if (unlikely(error)) {
> -				rte_pktmbuf_free(m);
> -				continue;
> -			}
> -		}
> -
> -		bufs[nb_send] = m;
> -		++nb_send;
> -	}
> -
>   	/* Enqueue packets to guest RX queue */
> -	while (nb_send) {
> +	while (nb_bufs) {
>   		uint16_t nb_pkts;
> -		uint16_t num = (uint16_t)RTE_MIN(nb_send,
> +		uint16_t num = (uint16_t)RTE_MIN(nb_bufs,
>   						 VHOST_MAX_PKT_BURST);
>   
>   		nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
>   						  &bufs[nb_tx], num);
>   
>   		nb_tx += nb_pkts;
> -		nb_send -= nb_pkts;
> +		nb_bufs -= nb_pkts;
>   		if (nb_pkts < num)
>   			break;
>   	}
> @@ -474,7 +457,7 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
>   	for (i = 0; likely(i < nb_tx); i++)
>   		nb_bytes += bufs[i]->pkt_len;
>   
> -	nb_missed = nb_bufs - nb_tx;
> +	nb_missed = nb_bufs;
>   
>   	r->stats.pkts += nb_tx;
>   	r->stats.bytes += nb_bytes;
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 8549afbbe1..2057f4e7fe 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -1218,6 +1218,16 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   		uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
>   		uint16_t nr_vec = 0;
>   
> +		/* Do VLAN tag insertion */
> +		if (pkts[pkt_idx]->ol_flags & PKT_TX_VLAN_PKT) {
> +			int error = rte_vlan_insert(&pkts[pkt_idx]);
> +			if (unlikely(error)) {
> +				rte_pktmbuf_free(pkts[pkt_idx]);
> +				pkts[pkt_idx] = NULL;
> +				continue;
> +			}
> +		}
> +
>   		if (unlikely(reserve_avail_buf_split(dev, vq,
>   						pkt_len, buf_vec, &num_buffers,
>   						avail_head, &nr_vec) < 0)) {
> @@ -1490,6 +1500,17 @@ virtio_dev_rx_packed(struct virtio_net *dev,
>   	do {
>   		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
>   
> +		/* Do VLAN tag insertion */
> +		if (pkts[pkt_idx]->ol_flags & PKT_TX_VLAN_PKT) {
> +			int error = rte_vlan_insert(&pkts[pkt_idx]);
> +			if (unlikely(error)) {
> +				rte_pktmbuf_free(pkts[pkt_idx]);
> +				pkts[pkt_idx] = NULL;
> +				pkt_idx++;
> +				continue;
> +			}
> +		}
> +
>   		if (count - pkt_idx >= PACKED_BATCH_SIZE) {
>   			if (!virtio_dev_rx_sync_batch_packed(dev, vq,
>   							&pkts[pkt_idx])) {
> 

It would make sense to do that in virtio_enqueue_offload, and it would
avoid code duplication.

Regards,
Maxime


      reply	other threads:[~2021-09-23 11:30 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-09 14:00 Gaoxiang Liu
2021-09-10  2:11 ` Gaoxiang Liu
2021-09-10  2:28   ` Gaoxiang Liu
2021-09-10  9:05   ` Gaoxiang Liu
2021-09-23 11:30     ` Maxime Coquelin [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=bab36866-e064-6df1-aa2b-defa9f04defa@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=gaoxiangliu0@163.com \
    --cc=liugaoxiang@huawei.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).