patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: Cheng Jiang <cheng1.jiang@intel.com>, chenbo.xia@intel.com
Cc: dev@dpdk.org, jiayu.hu@intel.com, xuan.ding@intel.com,
	wenwux.ma@intel.com, yuanx.wang@intel.com,
	yvonnex.yang@intel.com, xingguang.he@intel.com, stable@dpdk.org
Subject: Re: [PATCH v2 1/2] vhost: fix descs count in async vhost packed ring
Date: Fri, 21 Oct 2022 10:16:03 +0200	[thread overview]
Message-ID: <1240d2b7-d07d-9861-248e-d7d915f9f89a@redhat.com> (raw)
In-Reply-To: <20221011030803.16746-2-cheng1.jiang@intel.com>



On 10/11/22 05:08, Cheng Jiang wrote:
> When vhost receive packets from the front-end using packed virtqueue, it

receives

> might use multiple descriptors for one packet, so we need calculate and

to calculate

> record the descriptor number for each packet to update available
> descriptor counter and used descriptor counter, and rollback when DMA
> ring is full.
> 
> Fixes: fe8477ebbd94 ("vhost: support async packed ring dequeue")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
>   lib/vhost/virtio_net.c | 24 +++++++++++++++---------
>   1 file changed, 15 insertions(+), 9 deletions(-)
> 
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 8f4d0f0502..457ac2e92a 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -3548,14 +3548,15 @@ virtio_dev_tx_async_split_compliant(struct virtio_net *dev,
>   }
>   
>   static __rte_always_inline void
> -vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq, uint16_t buf_id)
> +vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
> +				uint16_t buf_id, uint16_t count)
>   {
>   	struct vhost_async *async = vq->async;
>   	uint16_t idx = async->buffer_idx_packed;
>   
>   	async->buffers_packed[idx].id = buf_id;
>   	async->buffers_packed[idx].len = 0;
> -	async->buffers_packed[idx].count = 1;
> +	async->buffers_packed[idx].count = count;
>   
>   	async->buffer_idx_packed++;
>   	if (async->buffer_idx_packed >= vq->size)
> @@ -3576,6 +3577,8 @@ virtio_dev_tx_async_single_packed(struct virtio_net *dev,
>   	uint16_t nr_vec = 0;
>   	uint32_t buf_len;
>   	struct buf_vector buf_vec[BUF_VECTOR_MAX];
> +	struct vhost_async *async = vq->async;
> +	struct async_inflight_info *pkts_info = async->pkts_info;
>   	static bool allocerr_warned;
>   
>   	if (unlikely(fill_vec_buf_packed(dev, vq, vq->last_avail_idx, &desc_count,
> @@ -3604,8 +3607,12 @@ virtio_dev_tx_async_single_packed(struct virtio_net *dev,
>   		return -1;
>   	}
>   
> +	pkts_info[slot_idx].descs = desc_count;
> +
>   	/* update async shadow packed ring */
> -	vhost_async_shadow_dequeue_single_packed(vq, buf_id);
> +	vhost_async_shadow_dequeue_single_packed(vq, buf_id, desc_count);
> +
> +	vq_inc_last_avail_packed(vq, desc_count);
>   
>   	return err;
>   }
> @@ -3644,9 +3651,6 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   		}
>   
>   		pkts_info[slot_idx].mbuf = pkt;
> -
> -		vq_inc_last_avail_packed(vq, 1);
> -
>   	}
>   
>   	n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
> @@ -3657,6 +3661,8 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   	pkt_err = pkt_idx - n_xfer;
>   
>   	if (unlikely(pkt_err)) {
> +		uint16_t descs_err = 0;
> +
>   		pkt_idx -= pkt_err;
>   
>   		/**
> @@ -3673,10 +3679,10 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
>   		}
>   
>   		/* recover available ring */
> -		if (vq->last_avail_idx >= pkt_err) {
> -			vq->last_avail_idx -= pkt_err;
> +		if (vq->last_avail_idx >= descs_err) {
> +			vq->last_avail_idx -= descs_err;
>   		} else {
> -			vq->last_avail_idx += vq->size - pkt_err;
> +			vq->last_avail_idx += vq->size - descs_err;
>   			vq->avail_wrap_counter ^= 1;
>   		}
>   	}

If only the commit message typos need to be fixed, maybe no need to send
a new version.

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


  reply	other threads:[~2022-10-21  8:16 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20220822043126.19340-1-cheng1.jiang@intel.com>
     [not found] ` <20221011030803.16746-1-cheng1.jiang@intel.com>
2022-10-11  3:08   ` Cheng Jiang
2022-10-21  8:16     ` Maxime Coquelin [this message]
2022-10-24  1:41       ` Jiang, Cheng1
2022-10-24  8:42         ` Xia, Chenbo
2022-10-11  3:08   ` [PATCH v2 2/2] vhost: fix slot index calculation in async vhost Cheng Jiang
2022-10-13  9:40     ` Ling, WeiX
2022-10-21  8:17     ` Maxime Coquelin
2022-10-24  8:43     ` Xia, Chenbo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1240d2b7-d07d-9861-248e-d7d915f9f89a@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=chenbo.xia@intel.com \
    --cc=cheng1.jiang@intel.com \
    --cc=dev@dpdk.org \
    --cc=jiayu.hu@intel.com \
    --cc=stable@dpdk.org \
    --cc=wenwux.ma@intel.com \
    --cc=xingguang.he@intel.com \
    --cc=xuan.ding@intel.com \
    --cc=yuanx.wang@intel.com \
    --cc=yvonnex.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).