DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH v1] vhost: support async copy free segmentations
@ 2020-07-15  7:46 patrick.fu
  2020-07-15 11:15 ` [dpdk-dev] [PATCH v2] " patrick.fu
  2020-07-21  5:47 ` [dpdk-dev] [PATCH v3] vhost: fix wrong async completion of multi-seg packets patrick.fu
  0 siblings, 2 replies; 10+ messages in thread
From: patrick.fu @ 2020-07-15  7:46 UTC (permalink / raw)
  To: dev, maxime.coquelin, chenbo.xia; +Cc: Patrick Fu

From: Patrick Fu <patrick.fu@intel.com>

Vhost async enqueue assumes that all async copies should break at packet
boundary. i.e. if a packet is splited into multiple copy segments, the
async engine should always report copy completion when entire packet is
finished. This patch removes the assumption.

Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")

Signed-off-by: Patrick Fu <patrick.fu@intel.com>
---
 lib/librte_vhost/vhost.h      |  3 +++
 lib/librte_vhost/virtio_net.c | 12 ++++++++----
 2 files changed, 11 insertions(+), 4 deletions(-)

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 8c01cee42..0f7212f88 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -46,6 +46,8 @@
 
 #define MAX_PKT_BURST 32
 
+#define ASYNC_MAX_POLL_SEG 255
+
 #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)
 #define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
 
@@ -225,6 +227,7 @@ struct vhost_virtqueue {
 	uint64_t	*async_pending_info;
 	uint16_t	async_pkts_idx;
 	uint16_t	async_pkts_inflight_n;
+	uint16_t	async_last_seg_n;
 
 	/* vq async features */
 	bool		async_inorder;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 1d0be3dd4..c6fa33f37 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1652,12 +1652,14 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
 		vq_size, vq->async_pkts_inflight_n);
 
-	n_pkts_cpl =
-		vq->async_ops.check_completed_copies(vid, queue_id, 0, count);
+	n_pkts_cpl = vq->async_ops.check_completed_copies(vid, queue_id,
+		0, ASYNC_MAX_POLL_SEG - vq->async_last_seg_n) +
+		vq->async_last_seg_n;
 
 	rte_smp_wmb();
 
-	while (likely(((start_idx + n_pkts_put) & (vq_size - 1)) != pkts_idx)) {
+	while (likely((n_pkts_put < count) &&
+		(((start_idx + n_pkts_put) & (vq_size - 1)) != pkts_idx))) {
 		uint64_t info = async_pending_info[
 			(start_idx + n_pkts_put) & (vq_size - 1)];
 		uint64_t n_segs;
@@ -1666,7 +1668,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		n_segs = info >> ASYNC_PENDING_INFO_N_SFT;
 
 		if (n_segs) {
-			if (!n_pkts_cpl || n_pkts_cpl < n_segs) {
+			if (unlikely(n_pkts_cpl < n_segs)) {
 				n_pkts_put--;
 				n_descs -= info & ASYNC_PENDING_INFO_N_MSK;
 				if (n_pkts_cpl) {
@@ -1684,6 +1686,8 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		}
 	}
 
+	vq->async_last_seg_n = n_pkts_cpl;
+
 	if (n_pkts_put) {
 		vq->async_pkts_inflight_n -= n_pkts_put;
 		__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
-- 
2.18.4


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v2] vhost: support async copy free segmentations
  2020-07-15  7:46 [dpdk-dev] [PATCH v1] vhost: support async copy free segmentations patrick.fu
@ 2020-07-15 11:15 ` patrick.fu
  2020-07-17  3:21   ` Xia, Chenbo
  2020-07-20 14:58   ` Maxime Coquelin
  2020-07-21  5:47 ` [dpdk-dev] [PATCH v3] vhost: fix wrong async completion of multi-seg packets patrick.fu
  1 sibling, 2 replies; 10+ messages in thread
From: patrick.fu @ 2020-07-15 11:15 UTC (permalink / raw)
  To: dev, maxime.coquelin, chenbo.xia; +Cc: patrick.fu, yinan.wang

From: Patrick Fu <patrick.fu@intel.com>

Vhost async enqueue assumes that all async copies should break at packet
boundary. i.e. if a packet is splited into multiple copy segments, the
async engine should always report copy completion when entire packet is
finished. This patch removes the assumption.

Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")

Signed-off-by: Patrick Fu <patrick.fu@intel.com>
---
v2:
 - fix an issue that can stuck async poll when packets buffer is full
 - rename a local variable to better reflect its usage

 lib/librte_vhost/vhost.h      |  3 +++
 lib/librte_vhost/virtio_net.c | 17 ++++++++++++-----
 2 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 8c01cee42..0f7212f88 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -46,6 +46,8 @@
 
 #define MAX_PKT_BURST 32
 
+#define ASYNC_MAX_POLL_SEG 255
+
 #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)
 #define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
 
@@ -225,6 +227,7 @@ struct vhost_virtqueue {
 	uint64_t	*async_pending_info;
 	uint16_t	async_pkts_idx;
 	uint16_t	async_pkts_inflight_n;
+	uint16_t	async_last_seg_n;
 
 	/* vq async features */
 	bool		async_inorder;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 1d0be3dd4..17808ab29 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1633,6 +1633,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	struct vhost_virtqueue *vq;
 	uint16_t n_pkts_cpl, n_pkts_put = 0, n_descs = 0;
 	uint16_t start_idx, pkts_idx, vq_size;
+	uint16_t n_inflight;
 	uint64_t *async_pending_info;
 
 	VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
@@ -1646,28 +1647,32 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 
 	rte_spinlock_lock(&vq->access_lock);
 
+	n_inflight = vq->async_pkts_inflight_n;
 	pkts_idx = vq->async_pkts_idx;
 	async_pending_info = vq->async_pending_info;
 	vq_size = vq->size;
 	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
 		vq_size, vq->async_pkts_inflight_n);
 
-	n_pkts_cpl =
-		vq->async_ops.check_completed_copies(vid, queue_id, 0, count);
+	n_pkts_cpl = vq->async_ops.check_completed_copies(vid, queue_id,
+		0, ASYNC_MAX_POLL_SEG - vq->async_last_seg_n) +
+		vq->async_last_seg_n;
 
 	rte_smp_wmb();
 
-	while (likely(((start_idx + n_pkts_put) & (vq_size - 1)) != pkts_idx)) {
+	while (likely((n_pkts_put < count) && n_inflight)) {
 		uint64_t info = async_pending_info[
 			(start_idx + n_pkts_put) & (vq_size - 1)];
 		uint64_t n_segs;
 		n_pkts_put++;
+		n_inflight--;
 		n_descs += info & ASYNC_PENDING_INFO_N_MSK;
 		n_segs = info >> ASYNC_PENDING_INFO_N_SFT;
 
 		if (n_segs) {
-			if (!n_pkts_cpl || n_pkts_cpl < n_segs) {
+			if (unlikely(n_pkts_cpl < n_segs)) {
 				n_pkts_put--;
+				n_inflight++;
 				n_descs -= info & ASYNC_PENDING_INFO_N_MSK;
 				if (n_pkts_cpl) {
 					async_pending_info[
@@ -1684,8 +1689,10 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		}
 	}
 
+	vq->async_last_seg_n = n_pkts_cpl;
+
 	if (n_pkts_put) {
-		vq->async_pkts_inflight_n -= n_pkts_put;
+		vq->async_pkts_inflight_n = n_inflight;
 		__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
 
 		vhost_vring_call_split(dev, vq);
-- 
2.18.4


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2] vhost: support async copy free segmentations
  2020-07-15 11:15 ` [dpdk-dev] [PATCH v2] " patrick.fu
@ 2020-07-17  3:21   ` Xia, Chenbo
  2020-07-17 11:52     ` Ferruh Yigit
  2020-07-20 14:58   ` Maxime Coquelin
  1 sibling, 1 reply; 10+ messages in thread
From: Xia, Chenbo @ 2020-07-17  3:21 UTC (permalink / raw)
  To: Fu, Patrick, dev, maxime.coquelin; +Cc: Wang, Yinan


> -----Original Message-----
> From: Fu, Patrick <patrick.fu@intel.com>
> Sent: Wednesday, July 15, 2020 7:15 PM
> To: dev@dpdk.org; maxime.coquelin@redhat.com; Xia, Chenbo
> <chenbo.xia@intel.com>
> Cc: Fu, Patrick <patrick.fu@intel.com>; Wang, Yinan <yinan.wang@intel.com>
> Subject: [PATCH v2] vhost: support async copy free segmentations
> 
> From: Patrick Fu <patrick.fu@intel.com>
> 
> Vhost async enqueue assumes that all async copies should break at packet
> boundary. i.e. if a packet is splited into multiple copy segments, the async engine
> should always report copy completion when entire packet is finished. This patch
> removes the assumption.
> 
> Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
> 
> Signed-off-by: Patrick Fu <patrick.fu@intel.com>
> ---
> v2:
>  - fix an issue that can stuck async poll when packets buffer is full
>  - rename a local variable to better reflect its usage
> 
>  lib/librte_vhost/vhost.h      |  3 +++
>  lib/librte_vhost/virtio_net.c | 17 ++++++++++++-----
>  2 files changed, 15 insertions(+), 5 deletions(-)
> 
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index
> 8c01cee42..0f7212f88 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -46,6 +46,8 @@
> 
>  #define MAX_PKT_BURST 32
> 
> +#define ASYNC_MAX_POLL_SEG 255
> +
>  #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)  #define
> VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
> 
> @@ -225,6 +227,7 @@ struct vhost_virtqueue {
>  	uint64_t	*async_pending_info;
>  	uint16_t	async_pkts_idx;
>  	uint16_t	async_pkts_inflight_n;
> +	uint16_t	async_last_seg_n;
> 
>  	/* vq async features */
>  	bool		async_inorder;
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index
> 1d0be3dd4..17808ab29 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -1633,6 +1633,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid,
> uint16_t queue_id,
>  	struct vhost_virtqueue *vq;
>  	uint16_t n_pkts_cpl, n_pkts_put = 0, n_descs = 0;
>  	uint16_t start_idx, pkts_idx, vq_size;
> +	uint16_t n_inflight;
>  	uint64_t *async_pending_info;
> 
>  	VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__); @@ -
> 1646,28 +1647,32 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid,
> uint16_t queue_id,
> 
>  	rte_spinlock_lock(&vq->access_lock);
> 
> +	n_inflight = vq->async_pkts_inflight_n;
>  	pkts_idx = vq->async_pkts_idx;
>  	async_pending_info = vq->async_pending_info;
>  	vq_size = vq->size;
>  	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
>  		vq_size, vq->async_pkts_inflight_n);
> 
> -	n_pkts_cpl =
> -		vq->async_ops.check_completed_copies(vid, queue_id, 0,
> count);
> +	n_pkts_cpl = vq->async_ops.check_completed_copies(vid, queue_id,
> +		0, ASYNC_MAX_POLL_SEG - vq->async_last_seg_n) +
> +		vq->async_last_seg_n;
> 
>  	rte_smp_wmb();
> 
> -	while (likely(((start_idx + n_pkts_put) & (vq_size - 1)) != pkts_idx)) {
> +	while (likely((n_pkts_put < count) && n_inflight)) {
>  		uint64_t info = async_pending_info[
>  			(start_idx + n_pkts_put) & (vq_size - 1)];
>  		uint64_t n_segs;
>  		n_pkts_put++;
> +		n_inflight--;
>  		n_descs += info & ASYNC_PENDING_INFO_N_MSK;
>  		n_segs = info >> ASYNC_PENDING_INFO_N_SFT;
> 
>  		if (n_segs) {
> -			if (!n_pkts_cpl || n_pkts_cpl < n_segs) {
> +			if (unlikely(n_pkts_cpl < n_segs)) {
>  				n_pkts_put--;
> +				n_inflight++;
>  				n_descs -= info &
> ASYNC_PENDING_INFO_N_MSK;
>  				if (n_pkts_cpl) {
>  					async_pending_info[
> @@ -1684,8 +1689,10 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid,
> uint16_t queue_id,
>  		}
>  	}
> 
> +	vq->async_last_seg_n = n_pkts_cpl;
> +
>  	if (n_pkts_put) {
> -		vq->async_pkts_inflight_n -= n_pkts_put;
> +		vq->async_pkts_inflight_n = n_inflight;
>  		__atomic_add_fetch(&vq->used->idx, n_descs,
> __ATOMIC_RELEASE);
> 
>  		vhost_vring_call_split(dev, vq);
> --
> 2.18.4

Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2] vhost: support async copy free segmentations
  2020-07-17  3:21   ` Xia, Chenbo
@ 2020-07-17 11:52     ` Ferruh Yigit
  0 siblings, 0 replies; 10+ messages in thread
From: Ferruh Yigit @ 2020-07-17 11:52 UTC (permalink / raw)
  To: Xia, Chenbo, Fu, Patrick, dev, maxime.coquelin; +Cc: Wang, Yinan

On 7/17/2020 4:21 AM, Xia, Chenbo wrote:
> 
>> -----Original Message-----
>> From: Fu, Patrick <patrick.fu@intel.com>
>> Sent: Wednesday, July 15, 2020 7:15 PM
>> To: dev@dpdk.org; maxime.coquelin@redhat.com; Xia, Chenbo
>> <chenbo.xia@intel.com>
>> Cc: Fu, Patrick <patrick.fu@intel.com>; Wang, Yinan <yinan.wang@intel.com>
>> Subject: [PATCH v2] vhost: support async copy free segmentations
>>
>> From: Patrick Fu <patrick.fu@intel.com>
>>
>> Vhost async enqueue assumes that all async copies should break at packet
>> boundary. i.e. if a packet is splited into multiple copy segments, the async engine
>> should always report copy completion when entire packet is finished. This patch
>> removes the assumption.
>>
>> Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
>>
>> Signed-off-by: Patrick Fu <patrick.fu@intel.com>
> 
> Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
> 

Applied to dpdk-next-net/master, thanks.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2] vhost: support async copy free segmentations
  2020-07-15 11:15 ` [dpdk-dev] [PATCH v2] " patrick.fu
  2020-07-17  3:21   ` Xia, Chenbo
@ 2020-07-20 14:58   ` Maxime Coquelin
  2020-07-20 16:49     ` Ferruh Yigit
  2020-07-21  5:52     ` Fu, Patrick
  1 sibling, 2 replies; 10+ messages in thread
From: Maxime Coquelin @ 2020-07-20 14:58 UTC (permalink / raw)
  To: patrick.fu, dev, chenbo.xia; +Cc: yinan.wang

Hi Patrick,

On 7/15/20 1:15 PM, patrick.fu@intel.com wrote:
> From: Patrick Fu <patrick.fu@intel.com>
> 
> Vhost async enqueue assumes that all async copies should break at packet
> boundary. i.e. if a packet is splited into multiple copy segments, the
> async engine should always report copy completion when entire packet is
> finished. This patch removes the assumption.

Could you please rework the commit message and title?
It is hard to understand what the patch is doing and why.

Thanks in advance,
Maxime

> Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
> 
> Signed-off-by: Patrick Fu <patrick.fu@intel.com>
> ---
> v2:
>  - fix an issue that can stuck async poll when packets buffer is full
>  - rename a local variable to better reflect its usage
> 
>  lib/librte_vhost/vhost.h      |  3 +++
>  lib/librte_vhost/virtio_net.c | 17 ++++++++++++-----
>  2 files changed, 15 insertions(+), 5 deletions(-)
> 
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> index 8c01cee42..0f7212f88 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -46,6 +46,8 @@
>  
>  #define MAX_PKT_BURST 32
>  
> +#define ASYNC_MAX_POLL_SEG 255
> +
>  #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)
>  #define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
>  
> @@ -225,6 +227,7 @@ struct vhost_virtqueue {
>  	uint64_t	*async_pending_info;
>  	uint16_t	async_pkts_idx;
>  	uint16_t	async_pkts_inflight_n;
> +	uint16_t	async_last_seg_n;
>  
>  	/* vq async features */
>  	bool		async_inorder;
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 1d0be3dd4..17808ab29 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -1633,6 +1633,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
>  	struct vhost_virtqueue *vq;
>  	uint16_t n_pkts_cpl, n_pkts_put = 0, n_descs = 0;
>  	uint16_t start_idx, pkts_idx, vq_size;
> +	uint16_t n_inflight;
>  	uint64_t *async_pending_info;
>  
>  	VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
> @@ -1646,28 +1647,32 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
>  
>  	rte_spinlock_lock(&vq->access_lock);
>  
> +	n_inflight = vq->async_pkts_inflight_n;
>  	pkts_idx = vq->async_pkts_idx;
>  	async_pending_info = vq->async_pending_info;
>  	vq_size = vq->size;
>  	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
>  		vq_size, vq->async_pkts_inflight_n);
>  
> -	n_pkts_cpl =
> -		vq->async_ops.check_completed_copies(vid, queue_id, 0, count);
> +	n_pkts_cpl = vq->async_ops.check_completed_copies(vid, queue_id,
> +		0, ASYNC_MAX_POLL_SEG - vq->async_last_seg_n) +
> +		vq->async_last_seg_n;
>  
>  	rte_smp_wmb();
>  
> -	while (likely(((start_idx + n_pkts_put) & (vq_size - 1)) != pkts_idx)) {
> +	while (likely((n_pkts_put < count) && n_inflight)) {
>  		uint64_t info = async_pending_info[
>  			(start_idx + n_pkts_put) & (vq_size - 1)];
>  		uint64_t n_segs;
>  		n_pkts_put++;
> +		n_inflight--;
>  		n_descs += info & ASYNC_PENDING_INFO_N_MSK;
>  		n_segs = info >> ASYNC_PENDING_INFO_N_SFT;
>  
>  		if (n_segs) {
> -			if (!n_pkts_cpl || n_pkts_cpl < n_segs) {
> +			if (unlikely(n_pkts_cpl < n_segs)) {
>  				n_pkts_put--;
> +				n_inflight++;
>  				n_descs -= info & ASYNC_PENDING_INFO_N_MSK;
>  				if (n_pkts_cpl) {
>  					async_pending_info[
> @@ -1684,8 +1689,10 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
>  		}
>  	}
>  
> +	vq->async_last_seg_n = n_pkts_cpl;
> +
>  	if (n_pkts_put) {
> -		vq->async_pkts_inflight_n -= n_pkts_put;
> +		vq->async_pkts_inflight_n = n_inflight;
>  		__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
>  
>  		vhost_vring_call_split(dev, vq);
> 


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2] vhost: support async copy free segmentations
  2020-07-20 14:58   ` Maxime Coquelin
@ 2020-07-20 16:49     ` Ferruh Yigit
  2020-07-21  5:52     ` Fu, Patrick
  1 sibling, 0 replies; 10+ messages in thread
From: Ferruh Yigit @ 2020-07-20 16:49 UTC (permalink / raw)
  To: Maxime Coquelin, patrick.fu, dev, chenbo.xia; +Cc: yinan.wang

On 7/20/2020 3:58 PM, Maxime Coquelin wrote:
> Hi Patrick,
> 
> On 7/15/20 1:15 PM, patrick.fu@intel.com wrote:
>> From: Patrick Fu <patrick.fu@intel.com>
>>
>> Vhost async enqueue assumes that all async copies should break at packet
>> boundary. i.e. if a packet is splited into multiple copy segments, the
>> async engine should always report copy completion when entire packet is
>> finished. This patch removes the assumption.
> 
> Could you please rework the commit message and title?
> It is hard to understand what the patch is doing and why.

Existing commit dropped from next-net and patchwork status updated.

> 
> Thanks in advance,
> Maxime
> 
>> Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
>>
>> Signed-off-by: Patrick Fu <patrick.fu@intel.com>

<...>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v3] vhost: fix wrong async completion of multi-seg packets
  2020-07-15  7:46 [dpdk-dev] [PATCH v1] vhost: support async copy free segmentations patrick.fu
  2020-07-15 11:15 ` [dpdk-dev] [PATCH v2] " patrick.fu
@ 2020-07-21  5:47 ` patrick.fu
  2020-07-21  8:40   ` Maxime Coquelin
  1 sibling, 1 reply; 10+ messages in thread
From: patrick.fu @ 2020-07-21  5:47 UTC (permalink / raw)
  To: dev, maxime.coquelin, chenbo.xia; +Cc: Patrick Fu

From: Patrick Fu <patrick.fu@intel.com>

In async enqueue copy, a packet could be split into multiple copy
segments. When polling the copy completion status, current async data
path assumes the async device callbacks are aware of the packet
boundary and return completed segments only if all segments belonging
to the same packet are done. Such assumption are not generic to common
async devices and may degrees the copy performance if async callbacks
have to implement it in software manner.

This patch adds tracking of the completed copy segments at vhost side.
If async copy device reports partial completion of a packets, only
vhost internal record is updated and vring status keeps unchanged
until remaining segments of the packet are also finished. The async
copy device is no longer necessary to care about the packet boundary.

Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")

Signed-off-by: Patrick Fu <patrick.fu@intel.com>
---
v2:
 - fix an issue that can stuck async poll when packets buffer is full
v3:
 - revise commit message and title
 - rename a local variable to better reflect its usage

 lib/librte_vhost/vhost.h      |  3 +++
 lib/librte_vhost/virtio_net.c | 27 +++++++++++++++++----------
 2 files changed, 20 insertions(+), 10 deletions(-)

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 8c01cee42..0f7212f88 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -46,6 +46,8 @@
 
 #define MAX_PKT_BURST 32
 
+#define ASYNC_MAX_POLL_SEG 255
+
 #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)
 #define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
 
@@ -225,6 +227,7 @@ struct vhost_virtqueue {
 	uint64_t	*async_pending_info;
 	uint16_t	async_pkts_idx;
 	uint16_t	async_pkts_inflight_n;
+	uint16_t	async_last_seg_n;
 
 	/* vq async features */
 	bool		async_inorder;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 1d0be3dd4..635113cb0 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1631,8 +1631,9 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 {
 	struct virtio_net *dev = get_device(vid);
 	struct vhost_virtqueue *vq;
-	uint16_t n_pkts_cpl, n_pkts_put = 0, n_descs = 0;
+	uint16_t n_segs_cpl, n_pkts_put = 0, n_descs = 0;
 	uint16_t start_idx, pkts_idx, vq_size;
+	uint16_t n_inflight;
 	uint64_t *async_pending_info;
 
 	VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
@@ -1646,46 +1647,52 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 
 	rte_spinlock_lock(&vq->access_lock);
 
+	n_inflight = vq->async_pkts_inflight_n;
 	pkts_idx = vq->async_pkts_idx;
 	async_pending_info = vq->async_pending_info;
 	vq_size = vq->size;
 	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
 		vq_size, vq->async_pkts_inflight_n);
 
-	n_pkts_cpl =
-		vq->async_ops.check_completed_copies(vid, queue_id, 0, count);
+	n_segs_cpl = vq->async_ops.check_completed_copies(vid, queue_id,
+		0, ASYNC_MAX_POLL_SEG - vq->async_last_seg_n) +
+		vq->async_last_seg_n;
 
 	rte_smp_wmb();
 
-	while (likely(((start_idx + n_pkts_put) & (vq_size - 1)) != pkts_idx)) {
+	while (likely((n_pkts_put < count) && n_inflight)) {
 		uint64_t info = async_pending_info[
 			(start_idx + n_pkts_put) & (vq_size - 1)];
 		uint64_t n_segs;
 		n_pkts_put++;
+		n_inflight--;
 		n_descs += info & ASYNC_PENDING_INFO_N_MSK;
 		n_segs = info >> ASYNC_PENDING_INFO_N_SFT;
 
 		if (n_segs) {
-			if (!n_pkts_cpl || n_pkts_cpl < n_segs) {
+			if (unlikely(n_segs_cpl < n_segs)) {
 				n_pkts_put--;
+				n_inflight++;
 				n_descs -= info & ASYNC_PENDING_INFO_N_MSK;
-				if (n_pkts_cpl) {
+				if (n_segs_cpl) {
 					async_pending_info[
 						(start_idx + n_pkts_put) &
 						(vq_size - 1)] =
-					((n_segs - n_pkts_cpl) <<
+					((n_segs - n_segs_cpl) <<
 					 ASYNC_PENDING_INFO_N_SFT) |
 					(info & ASYNC_PENDING_INFO_N_MSK);
-					n_pkts_cpl = 0;
+					n_segs_cpl = 0;
 				}
 				break;
 			}
-			n_pkts_cpl -= n_segs;
+			n_segs_cpl -= n_segs;
 		}
 	}
 
+	vq->async_last_seg_n = n_segs_cpl;
+
 	if (n_pkts_put) {
-		vq->async_pkts_inflight_n -= n_pkts_put;
+		vq->async_pkts_inflight_n = n_inflight;
 		__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
 
 		vhost_vring_call_split(dev, vq);
-- 
2.18.4


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2] vhost: support async copy free segmentations
  2020-07-20 14:58   ` Maxime Coquelin
  2020-07-20 16:49     ` Ferruh Yigit
@ 2020-07-21  5:52     ` Fu, Patrick
  1 sibling, 0 replies; 10+ messages in thread
From: Fu, Patrick @ 2020-07-21  5:52 UTC (permalink / raw)
  To: Maxime Coquelin, dev, Xia, Chenbo; +Cc: Wang, Yinan

Hi Maxime,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Monday, July 20, 2020 10:58 PM
> To: Fu, Patrick <patrick.fu@intel.com>; dev@dpdk.org; Xia, Chenbo
> <chenbo.xia@intel.com>
> Cc: Wang, Yinan <yinan.wang@intel.com>
> Subject: Re: [PATCH v2] vhost: support async copy free segmentations
> 
> Hi Patrick,
> 
> On 7/15/20 1:15 PM, patrick.fu@intel.com wrote:
> > From: Patrick Fu <patrick.fu@intel.com>
> >
> > Vhost async enqueue assumes that all async copies should break at
> > packet boundary. i.e. if a packet is splited into multiple copy
> > segments, the async engine should always report copy completion when
> > entire packet is finished. This patch removes the assumption.
> 
> Could you please rework the commit message and title?
> It is hard to understand what the patch is doing and why.

I  revise the commit message and title in my v3 patch. Hope that one will be more clear.

Thanks,

Patrick


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: fix wrong async completion of multi-seg packets
  2020-07-21  5:47 ` [dpdk-dev] [PATCH v3] vhost: fix wrong async completion of multi-seg packets patrick.fu
@ 2020-07-21  8:40   ` Maxime Coquelin
  2020-07-21 14:57     ` Ferruh Yigit
  0 siblings, 1 reply; 10+ messages in thread
From: Maxime Coquelin @ 2020-07-21  8:40 UTC (permalink / raw)
  To: patrick.fu, dev, chenbo.xia



On 7/21/20 7:47 AM, patrick.fu@intel.com wrote:
> From: Patrick Fu <patrick.fu@intel.com>
> 
> In async enqueue copy, a packet could be split into multiple copy
> segments. When polling the copy completion status, current async data
> path assumes the async device callbacks are aware of the packet
> boundary and return completed segments only if all segments belonging
> to the same packet are done. Such assumption are not generic to common
> async devices and may degrees the copy performance if async callbacks

s/degrees/degrades/

> have to implement it in software manner.
> 
> This patch adds tracking of the completed copy segments at vhost side.
> If async copy device reports partial completion of a packets, only
> vhost internal record is updated and vring status keeps unchanged
> until remaining segments of the packet are also finished. The async
> copy device is no longer necessary to care about the packet boundary.
> 
> Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
> 
> Signed-off-by: Patrick Fu <patrick.fu@intel.com>
> ---
> v2:
>  - fix an issue that can stuck async poll when packets buffer is full
> v3:
>  - revise commit message and title
>  - rename a local variable to better reflect its usage
> 
>  lib/librte_vhost/vhost.h      |  3 +++
>  lib/librte_vhost/virtio_net.c | 27 +++++++++++++++++----------
>  2 files changed, 20 insertions(+), 10 deletions(-)

With above typo fixed (can be done when applying, no need to resend):
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: fix wrong async completion of multi-seg packets
  2020-07-21  8:40   ` Maxime Coquelin
@ 2020-07-21 14:57     ` Ferruh Yigit
  0 siblings, 0 replies; 10+ messages in thread
From: Ferruh Yigit @ 2020-07-21 14:57 UTC (permalink / raw)
  To: Maxime Coquelin, patrick.fu, dev, chenbo.xia

On 7/21/2020 9:40 AM, Maxime Coquelin wrote:
> 
> 
> On 7/21/20 7:47 AM, patrick.fu@intel.com wrote:
>> From: Patrick Fu <patrick.fu@intel.com>
>>
>> In async enqueue copy, a packet could be split into multiple copy
>> segments. When polling the copy completion status, current async data
>> path assumes the async device callbacks are aware of the packet
>> boundary and return completed segments only if all segments belonging
>> to the same packet are done. Such assumption are not generic to common
>> async devices and may degrees the copy performance if async callbacks
> 
> s/degrees/degrades/
> 
>> have to implement it in software manner.
>>
>> This patch adds tracking of the completed copy segments at vhost side.
>> If async copy device reports partial completion of a packets, only
>> vhost internal record is updated and vring status keeps unchanged
>> until remaining segments of the packet are also finished. The async
>> copy device is no longer necessary to care about the packet boundary.
>>
>> Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
>>
>> Signed-off-by: Patrick Fu <patrick.fu@intel.com>
> 
> With above typo fixed (can be done when applying, no need to resend):
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> 

Applied to dpdk-next-net/master, thanks.

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2020-07-21 14:57 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-15  7:46 [dpdk-dev] [PATCH v1] vhost: support async copy free segmentations patrick.fu
2020-07-15 11:15 ` [dpdk-dev] [PATCH v2] " patrick.fu
2020-07-17  3:21   ` Xia, Chenbo
2020-07-17 11:52     ` Ferruh Yigit
2020-07-20 14:58   ` Maxime Coquelin
2020-07-20 16:49     ` Ferruh Yigit
2020-07-21  5:52     ` Fu, Patrick
2020-07-21  5:47 ` [dpdk-dev] [PATCH v3] vhost: fix wrong async completion of multi-seg packets patrick.fu
2020-07-21  8:40   ` Maxime Coquelin
2020-07-21 14:57     ` Ferruh Yigit

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git