patches for DPDK stable branches
 help / color / mirror / Atom feed
* Re: [dpdk-stable] [dpdk-dev] [PATCH v1] net/virtio-user: fix packed ring server mode
  2019-12-09 16:49 [dpdk-stable] [PATCH v1] net/virtio-user: fix packed ring server mode Xuan Ding
@ 2019-12-09  8:51 ` Liu, Yong
  2019-12-12 11:08   ` Ding, Xuan
  2019-12-18  2:24 ` [dpdk-stable] [PATCH v3] " Xuan Ding
  1 sibling, 1 reply; 14+ messages in thread
From: Liu, Yong @ 2019-12-09  8:51 UTC (permalink / raw)
  To: Ding, Xuan, maintainer
  Cc: dev, maxime.coquelin, Bie, Tiwei, Wang, Zhihong, Ding, Xuan, stable



> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Xuan Ding
> Sent: Tuesday, December 10, 2019 12:50 AM
> To: maintainer@dpdk.org
> Cc: dev@dpdk.org; maxime.coquelin@redhat.com; Bie, Tiwei
> <tiwei.bie@intel.com>; Wang, Zhihong <zhihong.wang@intel.com>; Ding, Xuan
> <xuan.ding@intel.com>; stable@dpdk.org
> Subject: [dpdk-dev] [PATCH v1] net/virtio-user: fix packed ring server
> mode
> 
> This patch fixes the situation where datapath does not work properly when
> vhost reconnects to virtio in server mode with packed ring.
> 
> Currently, virtio and vhost share memory of vring. For split ring, vhost
> can read the status of discriptors directly from the available ring and
> the used ring during reconnection. Therefore, the datapath can continue.
> 
> But for packed ring, when reconnecting to virtio, vhost cannot get the
> status of discriptors only through the descriptor ring. By resetting Tx
> and Rx queues, the datapath can restart from the beginning.
> 
> Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c      | 112 +++++++++++++++++++++++-
>  drivers/net/virtio/virtio_ethdev.h      |   3 +
>  drivers/net/virtio/virtio_user_ethdev.c |   8 ++
>  3 files changed, 121 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index 044eb10a7..c0cb0f23c 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -433,6 +433,94 @@ virtio_init_vring(struct virtqueue *vq)
>  	virtqueue_disable_intr(vq);
>  }
> 
> +static int
> +virtio_user_reset_rx_queues(struct rte_eth_dev *dev, uint16_t queue_idx)
> +{

Hi Xuan,
This function named as virtio_user_reset, but look like it has no relationship with virtio_user.
Maybe rename this function to virtqueue_reset and move it to virtqueue.c will be more suitable. 
Please also add suffix _packed as this function only workable for packed ring.

Thanks,
Marvin

> +	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
> +	struct virtio_hw *hw = dev->data->dev_private;
> +	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
> +	struct virtnet_rx *rxvq;
> +	struct vq_desc_extra *dxp;
> +	unsigned int vq_size;
> +	uint16_t desc_idx, i;
> +
> +	vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
> +
Virtqueue size has been set to vq_nentries in virtqueue structure. Do we need to re-catch it?

> +	vq->vq_packed.used_wrap_counter = 1;
> +	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> +	vq->vq_packed.event_flags_shadow = 0;
> +	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
> +
> +	rxvq = &vq->rxq;
> +	memset(rxvq->mz->addr, 0, rxvq->mz->len);
> +
> +	for (desc_idx = 0; desc_idx < vq_size; desc_idx++) {
> +		dxp = &vq->vq_descx[desc_idx];
> +		if (dxp->cookie != NULL) {
> +			rte_pktmbuf_free(dxp->cookie);
> +			dxp->cookie = NULL;
> +		}
> +	}
> +
> +	virtio_init_vring(vq);
> +
> +	for (i = 0; i < hw->max_queue_pairs; i++)
> +		if (rxvq->mpool != NULL)
> +			virtio_dev_rx_queue_setup_finish(dev, i);
> +

Please add parentheses for multiple lines loop content. 

> +	return 0;
> +}
> +
> +static int
> +virtio_user_reset_tx_queues(struct rte_eth_dev *dev, uint16_t queue_idx)
> +{
> +	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
> +	struct virtio_hw *hw = dev->data->dev_private;
> +	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
> +	struct virtnet_tx *txvq;
> +	struct vq_desc_extra *dxp;
> +	unsigned int vq_size;
> +	uint16_t desc_idx;
> +
> +	vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
> +
> +	vq->vq_packed.used_wrap_counter = 1;
> +	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> +	vq->vq_packed.event_flags_shadow = 0;
> +
> +	txvq = &vq->txq;
> +	memset(txvq->mz->addr, 0, txvq->mz->len);
> +	memset(txvq->virtio_net_hdr_mz->addr, 0,
> +		txvq->virtio_net_hdr_mz->len);
> +
> +	for (desc_idx = 0; desc_idx < vq_size; desc_idx++) {
> +		dxp = &vq->vq_descx[desc_idx];
> +		if (dxp->cookie != NULL) {
> +			rte_pktmbuf_free(dxp->cookie);
> +			dxp->cookie = NULL;
> +		}
> +	}
> +
> +	virtio_init_vring(vq);
> +
> +	return 0;
> +}
> +
> +static int
> +virtio_user_reset_queues(struct rte_eth_dev *eth_dev)
> +{
> +	uint16_t i;
> +
> +	/* Vring reset for each Tx queue and Rx queue. */
> +	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
> +		virtio_user_reset_rx_queues(eth_dev, i);
> +
> +	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
> +		virtio_user_reset_tx_queues(eth_dev, i);
> +
> +	return 0;
> +}
> +
>  static int
>  virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
>  {
> @@ -1913,6 +2001,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
>  			goto err_vtpci_init;
>  	}
> 
> +	rte_spinlock_init(&hw->state_lock);
> +
>  	/* reset device and negotiate default features */
>  	ret = virtio_init_device(eth_dev,
> VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
>  	if (ret < 0)
> @@ -2155,8 +2245,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>  			return -EBUSY;
>  		}
> 
> -	rte_spinlock_init(&hw->state_lock);
> -
>  	hw->use_simple_rx = 1;
> 
>  	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
> @@ -2421,6 +2509,26 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev
> *dev, int mask)
>  	return 0;
>  }
> 
> +int
> +virtio_user_reset_device(struct rte_eth_dev *eth_dev, struct virtio_hw
> *hw)
> +{
> +	/* Add lock to avoid queue contention. */
> +	rte_spinlock_lock(&hw->state_lock);
> +	hw->started = 0;
> +	/*
> +	 * Waitting for datapath to complete before resetting queues.
> +	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
> +	 */
> +	rte_delay_ms(1);
> +
> +	virtio_user_reset_queues(eth_dev);
> +
> +	hw->started = 1;
> +	rte_spinlock_unlock(&hw->state_lock);
> +
> +	return 0;
> +}
> +
>  static int
>  virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info
> *dev_info)
>  {
> diff --git a/drivers/net/virtio/virtio_ethdev.h
> b/drivers/net/virtio/virtio_ethdev.h
> index a10111758..72e9e3ff8 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -49,6 +49,9 @@
> 
>  extern const struct eth_dev_ops virtio_user_secondary_eth_dev_ops;
> 
> +int virtio_user_reset_device(struct rte_eth_dev *eth_dev,
> +		struct virtio_hw *hw);
> +
>  /*
>   * CQ function prototype
>   */
> diff --git a/drivers/net/virtio/virtio_user_ethdev.c
> b/drivers/net/virtio/virtio_user_ethdev.c
> index 3fc172573..49068a578 100644
> --- a/drivers/net/virtio/virtio_user_ethdev.c
> +++ b/drivers/net/virtio/virtio_user_ethdev.c
> @@ -31,6 +31,7 @@ virtio_user_server_reconnect(struct virtio_user_dev
> *dev)
>  	int ret;
>  	int connectfd;
>  	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
> +	struct virtio_hw *hw = eth_dev->data->dev_private;
> 
>  	connectfd = accept(dev->listenfd, NULL, NULL);
>  	if (connectfd < 0)
> @@ -51,6 +52,13 @@ virtio_user_server_reconnect(struct virtio_user_dev
> *dev)
> 
>  	dev->features &= dev->device_features;
> 
> +	/*
> +	 * * For packed ring, resetting queues
> +	 * is required in reconnection.
> +	 */
> +	if (vtpci_packed_queue(hw))
> +		virtio_user_reset_device(eth_dev, hw);
> +
>  	ret = virtio_user_start_device(dev);
>  	if (ret < 0)
>  		return -1;
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-stable] [PATCH v1] net/virtio-user: fix packed ring server mode
@ 2019-12-09 16:49 Xuan Ding
  2019-12-09  8:51 ` [dpdk-stable] [dpdk-dev] " Liu, Yong
  2019-12-18  2:24 ` [dpdk-stable] [PATCH v3] " Xuan Ding
  0 siblings, 2 replies; 14+ messages in thread
From: Xuan Ding @ 2019-12-09 16:49 UTC (permalink / raw)
  To: maintainer
  Cc: dev, maxime.coquelin, tiwei.bie, zhihong.wang, Xuan Ding, stable

This patch fixes the situation where datapath does not work properly when
vhost reconnects to virtio in server mode with packed ring.

Currently, virtio and vhost share memory of vring. For split ring, vhost
can read the status of discriptors directly from the available ring and
the used ring during reconnection. Therefore, the datapath can continue.

But for packed ring, when reconnecting to virtio, vhost cannot get the
status of discriptors only through the descriptor ring. By resetting Tx
and Rx queues, the datapath can restart from the beginning.

Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
Cc: stable@dpdk.org

Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
 drivers/net/virtio/virtio_ethdev.c      | 112 +++++++++++++++++++++++-
 drivers/net/virtio/virtio_ethdev.h      |   3 +
 drivers/net/virtio/virtio_user_ethdev.c |   8 ++
 3 files changed, 121 insertions(+), 2 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 044eb10a7..c0cb0f23c 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -433,6 +433,94 @@ virtio_init_vring(struct virtqueue *vq)
 	virtqueue_disable_intr(vq);
 }
 
+static int
+virtio_user_reset_rx_queues(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
+	struct virtio_hw *hw = dev->data->dev_private;
+	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+	struct virtnet_rx *rxvq;
+	struct vq_desc_extra *dxp;
+	unsigned int vq_size;
+	uint16_t desc_idx, i;
+
+	vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
+
+	vq->vq_packed.used_wrap_counter = 1;
+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+	vq->vq_packed.event_flags_shadow = 0;
+	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
+
+	rxvq = &vq->rxq;
+	memset(rxvq->mz->addr, 0, rxvq->mz->len);
+
+	for (desc_idx = 0; desc_idx < vq_size; desc_idx++) {
+		dxp = &vq->vq_descx[desc_idx];
+		if (dxp->cookie != NULL) {
+			rte_pktmbuf_free(dxp->cookie);
+			dxp->cookie = NULL;
+		}
+	}
+
+	virtio_init_vring(vq);
+
+	for (i = 0; i < hw->max_queue_pairs; i++)
+		if (rxvq->mpool != NULL)
+			virtio_dev_rx_queue_setup_finish(dev, i);
+
+	return 0;
+}
+
+static int
+virtio_user_reset_tx_queues(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
+	struct virtio_hw *hw = dev->data->dev_private;
+	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+	struct virtnet_tx *txvq;
+	struct vq_desc_extra *dxp;
+	unsigned int vq_size;
+	uint16_t desc_idx;
+
+	vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
+
+	vq->vq_packed.used_wrap_counter = 1;
+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+	vq->vq_packed.event_flags_shadow = 0;
+
+	txvq = &vq->txq;
+	memset(txvq->mz->addr, 0, txvq->mz->len);
+	memset(txvq->virtio_net_hdr_mz->addr, 0,
+		txvq->virtio_net_hdr_mz->len);
+
+	for (desc_idx = 0; desc_idx < vq_size; desc_idx++) {
+		dxp = &vq->vq_descx[desc_idx];
+		if (dxp->cookie != NULL) {
+			rte_pktmbuf_free(dxp->cookie);
+			dxp->cookie = NULL;
+		}
+	}
+
+	virtio_init_vring(vq);
+
+	return 0;
+}
+
+static int
+virtio_user_reset_queues(struct rte_eth_dev *eth_dev)
+{
+	uint16_t i;
+
+	/* Vring reset for each Tx queue and Rx queue. */
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+		virtio_user_reset_rx_queues(eth_dev, i);
+
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+		virtio_user_reset_tx_queues(eth_dev, i);
+
+	return 0;
+}
+
 static int
 virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
 {
@@ -1913,6 +2001,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 			goto err_vtpci_init;
 	}
 
+	rte_spinlock_init(&hw->state_lock);
+
 	/* reset device and negotiate default features */
 	ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
 	if (ret < 0)
@@ -2155,8 +2245,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 			return -EBUSY;
 		}
 
-	rte_spinlock_init(&hw->state_lock);
-
 	hw->use_simple_rx = 1;
 
 	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
@@ -2421,6 +2509,26 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	return 0;
 }
 
+int
+virtio_user_reset_device(struct rte_eth_dev *eth_dev, struct virtio_hw *hw)
+{
+	/* Add lock to avoid queue contention. */
+	rte_spinlock_lock(&hw->state_lock);
+	hw->started = 0;
+	/*
+	 * Waitting for datapath to complete before resetting queues.
+	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
+	 */
+	rte_delay_ms(1);
+
+	virtio_user_reset_queues(eth_dev);
+
+	hw->started = 1;
+	rte_spinlock_unlock(&hw->state_lock);
+
+	return 0;
+}
+
 static int
 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index a10111758..72e9e3ff8 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -49,6 +49,9 @@
 
 extern const struct eth_dev_ops virtio_user_secondary_eth_dev_ops;
 
+int virtio_user_reset_device(struct rte_eth_dev *eth_dev,
+		struct virtio_hw *hw);
+
 /*
  * CQ function prototype
  */
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 3fc172573..49068a578 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -31,6 +31,7 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
 	int ret;
 	int connectfd;
 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+	struct virtio_hw *hw = eth_dev->data->dev_private;
 
 	connectfd = accept(dev->listenfd, NULL, NULL);
 	if (connectfd < 0)
@@ -51,6 +52,13 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
 
 	dev->features &= dev->device_features;
 
+	/*
+	 * * For packed ring, resetting queues
+	 * is required in reconnection.
+	 */
+	if (vtpci_packed_queue(hw))
+		virtio_user_reset_device(eth_dev, hw);
+
 	ret = virtio_user_start_device(dev);
 	if (ret < 0)
 		return -1;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH v1] net/virtio-user: fix packed ring server mode
  2019-12-09  8:51 ` [dpdk-stable] [dpdk-dev] " Liu, Yong
@ 2019-12-12 11:08   ` Ding, Xuan
  0 siblings, 0 replies; 14+ messages in thread
From: Ding, Xuan @ 2019-12-12 11:08 UTC (permalink / raw)
  To: Liu, Yong, maintainer
  Cc: dev, maxime.coquelin, Bie, Tiwei, Wang, Zhihong, stable



> -----Original Message-----
> From: Liu, Yong <yong.liu@intel.com>
> Sent: Monday, December 9, 2019 4:52 PM
> To: Ding, Xuan <xuan.ding@intel.com>; maintainer@dpdk.org
> Cc: dev@dpdk.org; maxime.coquelin@redhat.com; Bie, Tiwei
> <tiwei.bie@intel.com>; Wang, Zhihong <zhihong.wang@intel.com>; Ding, Xuan
> <xuan.ding@intel.com>; stable@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v1] net/virtio-user: fix packed ring server mode
> 
> 
> 
> > -----Original Message-----
> > From: dev <dev-bounces@dpdk.org> On Behalf Of Xuan Ding
> > Sent: Tuesday, December 10, 2019 12:50 AM
> > To: maintainer@dpdk.org
> > Cc: dev@dpdk.org; maxime.coquelin@redhat.com; Bie, Tiwei
> > <tiwei.bie@intel.com>; Wang, Zhihong <zhihong.wang@intel.com>; Ding,
> > Xuan <xuan.ding@intel.com>; stable@dpdk.org
> > Subject: [dpdk-dev] [PATCH v1] net/virtio-user: fix packed ring server
> > mode
> >
> > This patch fixes the situation where datapath does not work properly
> > when vhost reconnects to virtio in server mode with packed ring.
> >
> > Currently, virtio and vhost share memory of vring. For split ring,
> > vhost can read the status of discriptors directly from the available
> > ring and the used ring during reconnection. Therefore, the datapath can
> continue.
> >
> > But for packed ring, when reconnecting to virtio, vhost cannot get the
> > status of discriptors only through the descriptor ring. By resetting
> > Tx and Rx queues, the datapath can restart from the beginning.
> >
> > Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> > ---
> >  drivers/net/virtio/virtio_ethdev.c      | 112 +++++++++++++++++++++++-
> >  drivers/net/virtio/virtio_ethdev.h      |   3 +
> >  drivers/net/virtio/virtio_user_ethdev.c |   8 ++
> >  3 files changed, 121 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/virtio/virtio_ethdev.c
> > b/drivers/net/virtio/virtio_ethdev.c
> > index 044eb10a7..c0cb0f23c 100644
> > --- a/drivers/net/virtio/virtio_ethdev.c
> > +++ b/drivers/net/virtio/virtio_ethdev.c
> > @@ -433,6 +433,94 @@ virtio_init_vring(struct virtqueue *vq)
> >  	virtqueue_disable_intr(vq);
> >  }
> >
> > +static int
> > +virtio_user_reset_rx_queues(struct rte_eth_dev *dev, uint16_t
> > +queue_idx) {
> 
> Hi Xuan,
> This function named as virtio_user_reset, but look like it has no relationship with
> virtio_user.
> Maybe rename this function to virtqueue_reset and move it to virtqueue.c will
> be more suitable.
> Please also add suffix _packed as this function only workable for packed ring.
> 
> Thanks,
> Marvin
> 

Hi Marvin, this function only works for virtqueue, so move it to virtqueue.c is better.
Two functions' name will be changed to virtqueue_rxvq_reset_packed and virtqueue_txvq_reset_packed, respectively.

Thank you for your suggestion.
Xuan

> > +	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
> > +	struct virtio_hw *hw = dev->data->dev_private;
> > +	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
> > +	struct virtnet_rx *rxvq;
> > +	struct vq_desc_extra *dxp;
> > +	unsigned int vq_size;
> > +	uint16_t desc_idx, i;
> > +
> > +	vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
> > +
> Virtqueue size has been set to vq_nentries in virtqueue structure. Do we need to
> re-catch it?
> 

Virtqueue size doesn't need to be re-catched, so vq->vq_nentries will be used instead.

> > +	vq->vq_packed.used_wrap_counter = 1;
> > +	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> > +	vq->vq_packed.event_flags_shadow = 0;
> > +	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
> > +
> > +	rxvq = &vq->rxq;
> > +	memset(rxvq->mz->addr, 0, rxvq->mz->len);
> > +
> > +	for (desc_idx = 0; desc_idx < vq_size; desc_idx++) {
> > +		dxp = &vq->vq_descx[desc_idx];
> > +		if (dxp->cookie != NULL) {
> > +			rte_pktmbuf_free(dxp->cookie);
> > +			dxp->cookie = NULL;
> > +		}
> > +	}
> > +
> > +	virtio_init_vring(vq);
> > +
> > +	for (i = 0; i < hw->max_queue_pairs; i++)
> > +		if (rxvq->mpool != NULL)
> > +			virtio_dev_rx_queue_setup_finish(dev, i);
> > +
> 
> Please add parentheses for multiple lines loop content.

Thank you for your notice.

> 
> > +	return 0;
> > +}
> > +
> > +static int
> > +virtio_user_reset_tx_queues(struct rte_eth_dev *dev, uint16_t
> > +queue_idx) {
> > +	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
> > +	struct virtio_hw *hw = dev->data->dev_private;
> > +	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
> > +	struct virtnet_tx *txvq;
> > +	struct vq_desc_extra *dxp;
> > +	unsigned int vq_size;
> > +	uint16_t desc_idx;
> > +
> > +	vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
> > +
> > +	vq->vq_packed.used_wrap_counter = 1;
> > +	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> > +	vq->vq_packed.event_flags_shadow = 0;
> > +
> > +	txvq = &vq->txq;
> > +	memset(txvq->mz->addr, 0, txvq->mz->len);
> > +	memset(txvq->virtio_net_hdr_mz->addr, 0,
> > +		txvq->virtio_net_hdr_mz->len);
> > +
> > +	for (desc_idx = 0; desc_idx < vq_size; desc_idx++) {
> > +		dxp = &vq->vq_descx[desc_idx];
> > +		if (dxp->cookie != NULL) {
> > +			rte_pktmbuf_free(dxp->cookie);
> > +			dxp->cookie = NULL;
> > +		}
> > +	}
> > +
> > +	virtio_init_vring(vq);
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +virtio_user_reset_queues(struct rte_eth_dev *eth_dev) {
> > +	uint16_t i;
> > +
> > +	/* Vring reset for each Tx queue and Rx queue. */
> > +	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
> > +		virtio_user_reset_rx_queues(eth_dev, i);
> > +
> > +	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
> > +		virtio_user_reset_tx_queues(eth_dev, i);
> > +
> > +	return 0;
> > +}
> > +
> >  static int
> >  virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
> > { @@ -1913,6 +2001,8 @@ eth_virtio_dev_init(struct rte_eth_dev
> > *eth_dev)
> >  			goto err_vtpci_init;
> >  	}
> >
> > +	rte_spinlock_init(&hw->state_lock);
> > +
> >  	/* reset device and negotiate default features */
> >  	ret = virtio_init_device(eth_dev,
> > VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
> >  	if (ret < 0)
> > @@ -2155,8 +2245,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
> >  			return -EBUSY;
> >  		}
> >
> > -	rte_spinlock_init(&hw->state_lock);
> > -
> >  	hw->use_simple_rx = 1;
> >
> >  	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { @@ -2421,6
> +2509,26
> > @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
> >  	return 0;
> >  }
> >
> > +int
> > +virtio_user_reset_device(struct rte_eth_dev *eth_dev, struct
> > +virtio_hw
> > *hw)
> > +{
> > +	/* Add lock to avoid queue contention. */
> > +	rte_spinlock_lock(&hw->state_lock);
> > +	hw->started = 0;
> > +	/*
> > +	 * Waitting for datapath to complete before resetting queues.
> > +	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
> > +	 */
> > +	rte_delay_ms(1);
> > +
> > +	virtio_user_reset_queues(eth_dev);
> > +
> > +	hw->started = 1;
> > +	rte_spinlock_unlock(&hw->state_lock);
> > +
> > +	return 0;
> > +}
> > +
> >  static int
> >  virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info
> > *dev_info)
> >  {
> > diff --git a/drivers/net/virtio/virtio_ethdev.h
> > b/drivers/net/virtio/virtio_ethdev.h
> > index a10111758..72e9e3ff8 100644
> > --- a/drivers/net/virtio/virtio_ethdev.h
> > +++ b/drivers/net/virtio/virtio_ethdev.h
> > @@ -49,6 +49,9 @@
> >
> >  extern const struct eth_dev_ops virtio_user_secondary_eth_dev_ops;
> >
> > +int virtio_user_reset_device(struct rte_eth_dev *eth_dev,
> > +		struct virtio_hw *hw);
> > +
> >  /*
> >   * CQ function prototype
> >   */
> > diff --git a/drivers/net/virtio/virtio_user_ethdev.c
> > b/drivers/net/virtio/virtio_user_ethdev.c
> > index 3fc172573..49068a578 100644
> > --- a/drivers/net/virtio/virtio_user_ethdev.c
> > +++ b/drivers/net/virtio/virtio_user_ethdev.c
> > @@ -31,6 +31,7 @@ virtio_user_server_reconnect(struct virtio_user_dev
> > *dev)
> >  	int ret;
> >  	int connectfd;
> >  	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
> > +	struct virtio_hw *hw = eth_dev->data->dev_private;
> >
> >  	connectfd = accept(dev->listenfd, NULL, NULL);
> >  	if (connectfd < 0)
> > @@ -51,6 +52,13 @@ virtio_user_server_reconnect(struct virtio_user_dev
> > *dev)
> >
> >  	dev->features &= dev->device_features;
> >
> > +	/*
> > +	 * * For packed ring, resetting queues
> > +	 * is required in reconnection.
> > +	 */
> > +	if (vtpci_packed_queue(hw))
> > +		virtio_user_reset_device(eth_dev, hw);
> > +
> >  	ret = virtio_user_start_device(dev);
> >  	if (ret < 0)
> >  		return -1;
> > --
> > 2.17.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-stable] [PATCH v3] net/virtio-user: fix packed ring server mode
  2019-12-09 16:49 [dpdk-stable] [PATCH v1] net/virtio-user: fix packed ring server mode Xuan Ding
  2019-12-09  8:51 ` [dpdk-stable] [dpdk-dev] " Liu, Yong
@ 2019-12-18  2:24 ` Xuan Ding
  2019-12-18  2:25   ` [dpdk-stable] [dpdk-dev] " Ye Xiaolong
  2019-12-23  7:25   ` [dpdk-stable] [PATCH v4] " Xuan Ding
  1 sibling, 2 replies; 14+ messages in thread
From: Xuan Ding @ 2019-12-18  2:24 UTC (permalink / raw)
  To: maxime.coquelin, tiwei.bie, zhihong.wang, yong.liu; +Cc: dev, xuan.ding, stable

This patch fixes the situation where datapath does not work properly when
vhost reconnects to virtio in server mode with packed ring.

Currently, virtio and vhost share memory of vring. For split ring, vhost
can read the status of discriptors directly from the available ring and
the used ring during reconnection. Therefore, the datapath can continue.

But for packed ring, when reconnecting to virtio, vhost cannot get the
status of discriptors only through the descriptor ring. By resetting Tx
and Rx queues, the datapath can restart from the beginning.

Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
Cc: stable@dpdk.org

Signed-off-by: Xuan Ding <xuan.ding@intel.com>

v3:
* Removed an extra asterisk from a comment.
* Renamed device reset function and moved it to virtio_user_ethdev.c.

v2:
* Renamed queue reset functions and moved them to virtqueue.c.
---
 drivers/net/virtio/virtio_ethdev.c      |  4 +-
 drivers/net/virtio/virtio_user_ethdev.c | 40 ++++++++++++++
 drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
 drivers/net/virtio/virtqueue.h          |  4 ++
 4 files changed, 117 insertions(+), 2 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 044eb10a7..f9d0ea70d 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1913,6 +1913,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 			goto err_vtpci_init;
 	}
 
+	rte_spinlock_init(&hw->state_lock);
+
 	/* reset device and negotiate default features */
 	ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
 	if (ret < 0)
@@ -2155,8 +2157,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 			return -EBUSY;
 		}
 
-	rte_spinlock_init(&hw->state_lock);
-
 	hw->use_simple_rx = 1;
 
 	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 3fc172573..425f48230 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -25,12 +25,48 @@
 #define virtio_user_get_dev(hw) \
 	((struct virtio_user_dev *)(hw)->virtio_user_dev)
 
+static void
+virtio_user_reset_queues_packed(struct rte_eth_dev *dev)
+{
+	struct virtio_hw *hw = dev->data->dev_private;
+	struct virtnet_rx *rxvq;
+	struct virtnet_tx *txvq;
+	uint16_t i;
+
+	/* Add lock to avoid queue contention. */
+	rte_spinlock_lock(&hw->state_lock);
+	hw->started = 0;
+
+	/*
+	 * Waitting for datapath to complete before resetting queues.
+	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
+	 */
+	rte_delay_ms(1);
+
+	/* Vring reset for each Tx queue and Rx queue. */
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxvq = dev->data->rx_queues[i];
+		virtqueue_rxvq_reset_packed(rxvq->vq);
+		virtio_dev_rx_queue_setup_finish(dev, i);
+	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txvq = dev->data->tx_queues[i];
+		virtqueue_txvq_reset_packed(txvq->vq);
+	}
+
+	hw->started = 1;
+	rte_spinlock_unlock(&hw->state_lock);
+}
+
+
 static int
 virtio_user_server_reconnect(struct virtio_user_dev *dev)
 {
 	int ret;
 	int connectfd;
 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+	struct virtio_hw *hw = eth_dev->data->dev_private;
 
 	connectfd = accept(dev->listenfd, NULL, NULL);
 	if (connectfd < 0)
@@ -51,6 +87,10 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
 
 	dev->features &= dev->device_features;
 
+	/* For packed ring, resetting queues is required in reconnection. */
+	if (vtpci_packed_queue(hw))
+		virtio_user_reset_queues_packed(eth_dev);
+
 	ret = virtio_user_start_device(dev);
 	if (ret < 0)
 		return -1;
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 5ff1e3587..0b4e3bf3e 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -141,3 +141,74 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
 	else
 		virtqueue_rxvq_flush_split(vq);
 }
+
+int
+virtqueue_rxvq_reset_packed(struct virtqueue *vq)
+{
+	int size = vq->vq_nentries;
+	struct vq_desc_extra *dxp;
+	struct virtnet_rx *rxvq;
+	uint16_t desc_idx;
+
+	vq->vq_used_cons_idx = 0;
+	vq->vq_desc_head_idx = 0;
+	vq->vq_avail_idx = 0;
+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+	vq->vq_free_cnt = vq->vq_nentries;
+
+	vq->vq_packed.used_wrap_counter = 1;
+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+	vq->vq_packed.event_flags_shadow = 0;
+	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
+
+	rxvq = &vq->rxq;
+	memset(rxvq->mz->addr, 0, rxvq->mz->len);
+
+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+		dxp = &vq->vq_descx[desc_idx];
+		if (dxp->cookie != NULL) {
+			rte_pktmbuf_free(dxp->cookie);
+			dxp->cookie = NULL;
+		}
+	}
+
+	vring_desc_init_packed(vq, size);
+
+	return 0;
+}
+
+int
+virtqueue_txvq_reset_packed(struct virtqueue *vq)
+{
+	int size = vq->vq_nentries;
+	struct vq_desc_extra *dxp;
+	struct virtnet_tx *txvq;
+	uint16_t desc_idx;
+
+	vq->vq_used_cons_idx = 0;
+	vq->vq_desc_head_idx = 0;
+	vq->vq_avail_idx = 0;
+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+	vq->vq_free_cnt = vq->vq_nentries;
+
+	vq->vq_packed.used_wrap_counter = 1;
+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+	vq->vq_packed.event_flags_shadow = 0;
+
+	txvq = &vq->txq;
+	memset(txvq->mz->addr, 0, txvq->mz->len);
+	memset(txvq->virtio_net_hdr_mz->addr, 0,
+		txvq->virtio_net_hdr_mz->len);
+
+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+		dxp = &vq->vq_descx[desc_idx];
+		if (dxp->cookie != NULL) {
+			rte_pktmbuf_free(dxp->cookie);
+			dxp->cookie = NULL;
+		}
+	}
+
+	vring_desc_init_packed(vq, size);
+
+	return 0;
+}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 8d7f197b1..58ad7309a 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -443,6 +443,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
 /* Flush the elements in the used ring. */
 void virtqueue_rxvq_flush(struct virtqueue *vq);
 
+int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
+
+int virtqueue_txvq_reset_packed(struct virtqueue *vq);
+
 static inline int
 virtqueue_full(const struct virtqueue *vq)
 {
-- 
2.17.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH v3] net/virtio-user: fix packed ring server mode
  2019-12-18  2:24 ` [dpdk-stable] [PATCH v3] " Xuan Ding
@ 2019-12-18  2:25   ` Ye Xiaolong
  2019-12-18  2:38     ` Ding, Xuan
  2019-12-23  7:25   ` [dpdk-stable] [PATCH v4] " Xuan Ding
  1 sibling, 1 reply; 14+ messages in thread
From: Ye Xiaolong @ 2019-12-18  2:25 UTC (permalink / raw)
  To: Xuan Ding; +Cc: maxime.coquelin, tiwei.bie, zhihong.wang, yong.liu, dev, stable

Hi, Xuan

On 12/18, Xuan Ding wrote:
>This patch fixes the situation where datapath does not work properly when
>vhost reconnects to virtio in server mode with packed ring.
>
>Currently, virtio and vhost share memory of vring. For split ring, vhost
>can read the status of discriptors directly from the available ring and
>the used ring during reconnection. Therefore, the datapath can continue.
>
>But for packed ring, when reconnecting to virtio, vhost cannot get the
>status of discriptors only through the descriptor ring. By resetting Tx
>and Rx queues, the datapath can restart from the beginning.
>
>Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
>Cc: stable@dpdk.org
>
>Signed-off-by: Xuan Ding <xuan.ding@intel.com>
>
>v3:
>* Removed an extra asterisk from a comment.
>* Renamed device reset function and moved it to virtio_user_ethdev.c.
>
>v2:
>* Renamed queue reset functions and moved them to virtqueue.c.

Please put these change log after below '---' marker, then they won't be shown in
commit log when you apply the patch by `git am`.

Thanks,
Xiaolong

>---
> drivers/net/virtio/virtio_ethdev.c      |  4 +-
> drivers/net/virtio/virtio_user_ethdev.c | 40 ++++++++++++++
> drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
> drivers/net/virtio/virtqueue.h          |  4 ++
> 4 files changed, 117 insertions(+), 2 deletions(-)
>
>diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
>index 044eb10a7..f9d0ea70d 100644
>--- a/drivers/net/virtio/virtio_ethdev.c
>+++ b/drivers/net/virtio/virtio_ethdev.c
>@@ -1913,6 +1913,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
> 			goto err_vtpci_init;
> 	}
> 
>+	rte_spinlock_init(&hw->state_lock);
>+
> 	/* reset device and negotiate default features */
> 	ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
> 	if (ret < 0)
>@@ -2155,8 +2157,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
> 			return -EBUSY;
> 		}
> 
>-	rte_spinlock_init(&hw->state_lock);
>-
> 	hw->use_simple_rx = 1;
> 
> 	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
>diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
>index 3fc172573..425f48230 100644
>--- a/drivers/net/virtio/virtio_user_ethdev.c
>+++ b/drivers/net/virtio/virtio_user_ethdev.c
>@@ -25,12 +25,48 @@
> #define virtio_user_get_dev(hw) \
> 	((struct virtio_user_dev *)(hw)->virtio_user_dev)
> 
>+static void
>+virtio_user_reset_queues_packed(struct rte_eth_dev *dev)
>+{
>+	struct virtio_hw *hw = dev->data->dev_private;
>+	struct virtnet_rx *rxvq;
>+	struct virtnet_tx *txvq;
>+	uint16_t i;
>+
>+	/* Add lock to avoid queue contention. */
>+	rte_spinlock_lock(&hw->state_lock);
>+	hw->started = 0;
>+
>+	/*
>+	 * Waitting for datapath to complete before resetting queues.
>+	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
>+	 */
>+	rte_delay_ms(1);
>+
>+	/* Vring reset for each Tx queue and Rx queue. */
>+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
>+		rxvq = dev->data->rx_queues[i];
>+		virtqueue_rxvq_reset_packed(rxvq->vq);
>+		virtio_dev_rx_queue_setup_finish(dev, i);
>+	}
>+
>+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
>+		txvq = dev->data->tx_queues[i];
>+		virtqueue_txvq_reset_packed(txvq->vq);
>+	}
>+
>+	hw->started = 1;
>+	rte_spinlock_unlock(&hw->state_lock);
>+}
>+
>+
> static int
> virtio_user_server_reconnect(struct virtio_user_dev *dev)
> {
> 	int ret;
> 	int connectfd;
> 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
>+	struct virtio_hw *hw = eth_dev->data->dev_private;
> 
> 	connectfd = accept(dev->listenfd, NULL, NULL);
> 	if (connectfd < 0)
>@@ -51,6 +87,10 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
> 
> 	dev->features &= dev->device_features;
> 
>+	/* For packed ring, resetting queues is required in reconnection. */
>+	if (vtpci_packed_queue(hw))
>+		virtio_user_reset_queues_packed(eth_dev);
>+
> 	ret = virtio_user_start_device(dev);
> 	if (ret < 0)
> 		return -1;
>diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
>index 5ff1e3587..0b4e3bf3e 100644
>--- a/drivers/net/virtio/virtqueue.c
>+++ b/drivers/net/virtio/virtqueue.c
>@@ -141,3 +141,74 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
> 	else
> 		virtqueue_rxvq_flush_split(vq);
> }
>+
>+int
>+virtqueue_rxvq_reset_packed(struct virtqueue *vq)
>+{
>+	int size = vq->vq_nentries;
>+	struct vq_desc_extra *dxp;
>+	struct virtnet_rx *rxvq;
>+	uint16_t desc_idx;
>+
>+	vq->vq_used_cons_idx = 0;
>+	vq->vq_desc_head_idx = 0;
>+	vq->vq_avail_idx = 0;
>+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
>+	vq->vq_free_cnt = vq->vq_nentries;
>+
>+	vq->vq_packed.used_wrap_counter = 1;
>+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
>+	vq->vq_packed.event_flags_shadow = 0;
>+	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
>+
>+	rxvq = &vq->rxq;
>+	memset(rxvq->mz->addr, 0, rxvq->mz->len);
>+
>+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
>+		dxp = &vq->vq_descx[desc_idx];
>+		if (dxp->cookie != NULL) {
>+			rte_pktmbuf_free(dxp->cookie);
>+			dxp->cookie = NULL;
>+		}
>+	}
>+
>+	vring_desc_init_packed(vq, size);
>+
>+	return 0;
>+}
>+
>+int
>+virtqueue_txvq_reset_packed(struct virtqueue *vq)
>+{
>+	int size = vq->vq_nentries;
>+	struct vq_desc_extra *dxp;
>+	struct virtnet_tx *txvq;
>+	uint16_t desc_idx;
>+
>+	vq->vq_used_cons_idx = 0;
>+	vq->vq_desc_head_idx = 0;
>+	vq->vq_avail_idx = 0;
>+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
>+	vq->vq_free_cnt = vq->vq_nentries;
>+
>+	vq->vq_packed.used_wrap_counter = 1;
>+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
>+	vq->vq_packed.event_flags_shadow = 0;
>+
>+	txvq = &vq->txq;
>+	memset(txvq->mz->addr, 0, txvq->mz->len);
>+	memset(txvq->virtio_net_hdr_mz->addr, 0,
>+		txvq->virtio_net_hdr_mz->len);
>+
>+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
>+		dxp = &vq->vq_descx[desc_idx];
>+		if (dxp->cookie != NULL) {
>+			rte_pktmbuf_free(dxp->cookie);
>+			dxp->cookie = NULL;
>+		}
>+	}
>+
>+	vring_desc_init_packed(vq, size);
>+
>+	return 0;
>+}
>diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
>index 8d7f197b1..58ad7309a 100644
>--- a/drivers/net/virtio/virtqueue.h
>+++ b/drivers/net/virtio/virtqueue.h
>@@ -443,6 +443,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
> /* Flush the elements in the used ring. */
> void virtqueue_rxvq_flush(struct virtqueue *vq);
> 
>+int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
>+
>+int virtqueue_txvq_reset_packed(struct virtqueue *vq);
>+
> static inline int
> virtqueue_full(const struct virtqueue *vq)
> {
>-- 
>2.17.1
>

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH v3] net/virtio-user: fix packed ring server mode
  2019-12-18  2:25   ` [dpdk-stable] [dpdk-dev] " Ye Xiaolong
@ 2019-12-18  2:38     ` Ding, Xuan
  0 siblings, 0 replies; 14+ messages in thread
From: Ding, Xuan @ 2019-12-18  2:38 UTC (permalink / raw)
  To: Ye, Xiaolong
  Cc: maxime.coquelin, Bie, Tiwei, Wang, Zhihong, Liu, Yong, dev, stable



> -----Original Message-----
> From: Ye, Xiaolong <xiaolong.ye@intel.com>
> Sent: Wednesday, December 18, 2019 10:25 AM
> To: Ding, Xuan <xuan.ding@intel.com>
> Cc: maxime.coquelin@redhat.com; Bie, Tiwei <tiwei.bie@intel.com>; Wang,
> Zhihong <zhihong.wang@intel.com>; Liu, Yong <yong.liu@intel.com>;
> dev@dpdk.org; stable@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v3] net/virtio-user: fix packed ring server mode
> 
> Hi, Xuan
> 
> On 12/18, Xuan Ding wrote:
> >This patch fixes the situation where datapath does not work properly
> >when vhost reconnects to virtio in server mode with packed ring.
> >
> >Currently, virtio and vhost share memory of vring. For split ring,
> >vhost can read the status of discriptors directly from the available
> >ring and the used ring during reconnection. Therefore, the datapath can
> continue.
> >
> >But for packed ring, when reconnecting to virtio, vhost cannot get the
> >status of discriptors only through the descriptor ring. By resetting Tx
> >and Rx queues, the datapath can restart from the beginning.
> >
> >Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
> >Cc: stable@dpdk.org
> >
> >Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> >
> >v3:
> >* Removed an extra asterisk from a comment.
> >* Renamed device reset function and moved it to virtio_user_ethdev.c.
> >
> >v2:
> >* Renamed queue reset functions and moved them to virtqueue.c.
> 
> Please put these change log after below '---' marker, then they won't be shown
> in commit log when you apply the patch by `git am`.
> 
> Thanks,
> Xiaolong

Hi, Xiaolong,
Thank you for your advice, I will change it in the next version.

Best regards,
Xuan

> 
> >---
> > drivers/net/virtio/virtio_ethdev.c      |  4 +-
> > drivers/net/virtio/virtio_user_ethdev.c | 40 ++++++++++++++
> > drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
> > drivers/net/virtio/virtqueue.h          |  4 ++
> > 4 files changed, 117 insertions(+), 2 deletions(-)
> >
> >diff --git a/drivers/net/virtio/virtio_ethdev.c
> >b/drivers/net/virtio/virtio_ethdev.c
> >index 044eb10a7..f9d0ea70d 100644
> >--- a/drivers/net/virtio/virtio_ethdev.c
> >+++ b/drivers/net/virtio/virtio_ethdev.c
> >@@ -1913,6 +1913,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
> > 			goto err_vtpci_init;
> > 	}
> >
> >+	rte_spinlock_init(&hw->state_lock);
> >+
> > 	/* reset device and negotiate default features */
> > 	ret = virtio_init_device(eth_dev,
> VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
> > 	if (ret < 0)
> >@@ -2155,8 +2157,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
> > 			return -EBUSY;
> > 		}
> >
> >-	rte_spinlock_init(&hw->state_lock);
> >-
> > 	hw->use_simple_rx = 1;
> >
> > 	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { diff --git
> >a/drivers/net/virtio/virtio_user_ethdev.c
> >b/drivers/net/virtio/virtio_user_ethdev.c
> >index 3fc172573..425f48230 100644
> >--- a/drivers/net/virtio/virtio_user_ethdev.c
> >+++ b/drivers/net/virtio/virtio_user_ethdev.c
> >@@ -25,12 +25,48 @@
> > #define virtio_user_get_dev(hw) \
> > 	((struct virtio_user_dev *)(hw)->virtio_user_dev)
> >
> >+static void
> >+virtio_user_reset_queues_packed(struct rte_eth_dev *dev) {
> >+	struct virtio_hw *hw = dev->data->dev_private;
> >+	struct virtnet_rx *rxvq;
> >+	struct virtnet_tx *txvq;
> >+	uint16_t i;
> >+
> >+	/* Add lock to avoid queue contention. */
> >+	rte_spinlock_lock(&hw->state_lock);
> >+	hw->started = 0;
> >+
> >+	/*
> >+	 * Waitting for datapath to complete before resetting queues.
> >+	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
> >+	 */
> >+	rte_delay_ms(1);
> >+
> >+	/* Vring reset for each Tx queue and Rx queue. */
> >+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> >+		rxvq = dev->data->rx_queues[i];
> >+		virtqueue_rxvq_reset_packed(rxvq->vq);
> >+		virtio_dev_rx_queue_setup_finish(dev, i);
> >+	}
> >+
> >+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> >+		txvq = dev->data->tx_queues[i];
> >+		virtqueue_txvq_reset_packed(txvq->vq);
> >+	}
> >+
> >+	hw->started = 1;
> >+	rte_spinlock_unlock(&hw->state_lock);
> >+}
> >+
> >+
> > static int
> > virtio_user_server_reconnect(struct virtio_user_dev *dev) {
> > 	int ret;
> > 	int connectfd;
> > 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
> >+	struct virtio_hw *hw = eth_dev->data->dev_private;
> >
> > 	connectfd = accept(dev->listenfd, NULL, NULL);
> > 	if (connectfd < 0)
> >@@ -51,6 +87,10 @@ virtio_user_server_reconnect(struct virtio_user_dev
> >*dev)
> >
> > 	dev->features &= dev->device_features;
> >
> >+	/* For packed ring, resetting queues is required in reconnection. */
> >+	if (vtpci_packed_queue(hw))
> >+		virtio_user_reset_queues_packed(eth_dev);
> >+
> > 	ret = virtio_user_start_device(dev);
> > 	if (ret < 0)
> > 		return -1;
> >diff --git a/drivers/net/virtio/virtqueue.c
> >b/drivers/net/virtio/virtqueue.c index 5ff1e3587..0b4e3bf3e 100644
> >--- a/drivers/net/virtio/virtqueue.c
> >+++ b/drivers/net/virtio/virtqueue.c
> >@@ -141,3 +141,74 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
> > 	else
> > 		virtqueue_rxvq_flush_split(vq);
> > }
> >+
> >+int
> >+virtqueue_rxvq_reset_packed(struct virtqueue *vq) {
> >+	int size = vq->vq_nentries;
> >+	struct vq_desc_extra *dxp;
> >+	struct virtnet_rx *rxvq;
> >+	uint16_t desc_idx;
> >+
> >+	vq->vq_used_cons_idx = 0;
> >+	vq->vq_desc_head_idx = 0;
> >+	vq->vq_avail_idx = 0;
> >+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
> >+	vq->vq_free_cnt = vq->vq_nentries;
> >+
> >+	vq->vq_packed.used_wrap_counter = 1;
> >+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> >+	vq->vq_packed.event_flags_shadow = 0;
> >+	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
> >+
> >+	rxvq = &vq->rxq;
> >+	memset(rxvq->mz->addr, 0, rxvq->mz->len);
> >+
> >+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
> >+		dxp = &vq->vq_descx[desc_idx];
> >+		if (dxp->cookie != NULL) {
> >+			rte_pktmbuf_free(dxp->cookie);
> >+			dxp->cookie = NULL;
> >+		}
> >+	}
> >+
> >+	vring_desc_init_packed(vq, size);
> >+
> >+	return 0;
> >+}
> >+
> >+int
> >+virtqueue_txvq_reset_packed(struct virtqueue *vq) {
> >+	int size = vq->vq_nentries;
> >+	struct vq_desc_extra *dxp;
> >+	struct virtnet_tx *txvq;
> >+	uint16_t desc_idx;
> >+
> >+	vq->vq_used_cons_idx = 0;
> >+	vq->vq_desc_head_idx = 0;
> >+	vq->vq_avail_idx = 0;
> >+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
> >+	vq->vq_free_cnt = vq->vq_nentries;
> >+
> >+	vq->vq_packed.used_wrap_counter = 1;
> >+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> >+	vq->vq_packed.event_flags_shadow = 0;
> >+
> >+	txvq = &vq->txq;
> >+	memset(txvq->mz->addr, 0, txvq->mz->len);
> >+	memset(txvq->virtio_net_hdr_mz->addr, 0,
> >+		txvq->virtio_net_hdr_mz->len);
> >+
> >+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
> >+		dxp = &vq->vq_descx[desc_idx];
> >+		if (dxp->cookie != NULL) {
> >+			rte_pktmbuf_free(dxp->cookie);
> >+			dxp->cookie = NULL;
> >+		}
> >+	}
> >+
> >+	vring_desc_init_packed(vq, size);
> >+
> >+	return 0;
> >+}
> >diff --git a/drivers/net/virtio/virtqueue.h
> >b/drivers/net/virtio/virtqueue.h index 8d7f197b1..58ad7309a 100644
> >--- a/drivers/net/virtio/virtqueue.h
> >+++ b/drivers/net/virtio/virtqueue.h
> >@@ -443,6 +443,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct
> >virtqueue *vq);
> > /* Flush the elements in the used ring. */  void
> >virtqueue_rxvq_flush(struct virtqueue *vq);
> >
> >+int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
> >+
> >+int virtqueue_txvq_reset_packed(struct virtqueue *vq);
> >+
> > static inline int
> > virtqueue_full(const struct virtqueue *vq)  {
> >--
> >2.17.1
> >

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-stable] [PATCH v4] net/virtio-user: fix packed ring server mode
  2019-12-18  2:24 ` [dpdk-stable] [PATCH v3] " Xuan Ding
  2019-12-18  2:25   ` [dpdk-stable] [dpdk-dev] " Ye Xiaolong
@ 2019-12-23  7:25   ` Xuan Ding
  2020-01-14 15:04     ` Maxime Coquelin
  2020-01-15  6:13     ` [dpdk-stable] [PATCH v5] " Xuan Ding
  1 sibling, 2 replies; 14+ messages in thread
From: Xuan Ding @ 2019-12-23  7:25 UTC (permalink / raw)
  To: maxime.coquelin, tiwei.bie, zhihong.wang, yong.liu, xiaolong.ye
  Cc: dev, Xuan Ding, stable

This patch fixes the situation where datapath does not work properly when
vhost reconnects to virtio in server mode with packed ring.

Currently, virtio and vhost share memory of vring. For split ring, vhost
can read the status of discriptors directly from the available ring and
the used ring during reconnection. Therefore, the datapath can continue.

But for packed ring, when reconnecting to virtio, vhost cannot get the
status of discriptors only through the descriptor ring. By resetting Tx
and Rx queues, the datapath can restart from the beginning.

Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
Cc: stable@dpdk.org

Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---

v4:
* Moved change log below '---' marker.

v3:
* Removed an extra asterisk from a comment.
* Renamed device reset function and moved it to virtio_user_ethdev.c.

v2:
* Renamed queue reset functions and moved them to virtqueue.c.

 drivers/net/virtio/virtio_ethdev.c      |  4 +-
 drivers/net/virtio/virtio_user_ethdev.c | 40 ++++++++++++++
 drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
 drivers/net/virtio/virtqueue.h          |  4 ++
 4 files changed, 117 insertions(+), 2 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 044eb10a7..f9d0ea70d 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1913,6 +1913,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 			goto err_vtpci_init;
 	}
 
+	rte_spinlock_init(&hw->state_lock);
+
 	/* reset device and negotiate default features */
 	ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
 	if (ret < 0)
@@ -2155,8 +2157,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 			return -EBUSY;
 		}
 
-	rte_spinlock_init(&hw->state_lock);
-
 	hw->use_simple_rx = 1;
 
 	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 3fc172573..425f48230 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -25,12 +25,48 @@
 #define virtio_user_get_dev(hw) \
 	((struct virtio_user_dev *)(hw)->virtio_user_dev)
 
+static void
+virtio_user_reset_queues_packed(struct rte_eth_dev *dev)
+{
+	struct virtio_hw *hw = dev->data->dev_private;
+	struct virtnet_rx *rxvq;
+	struct virtnet_tx *txvq;
+	uint16_t i;
+
+	/* Add lock to avoid queue contention. */
+	rte_spinlock_lock(&hw->state_lock);
+	hw->started = 0;
+
+	/*
+	 * Waitting for datapath to complete before resetting queues.
+	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
+	 */
+	rte_delay_ms(1);
+
+	/* Vring reset for each Tx queue and Rx queue. */
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxvq = dev->data->rx_queues[i];
+		virtqueue_rxvq_reset_packed(rxvq->vq);
+		virtio_dev_rx_queue_setup_finish(dev, i);
+	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txvq = dev->data->tx_queues[i];
+		virtqueue_txvq_reset_packed(txvq->vq);
+	}
+
+	hw->started = 1;
+	rte_spinlock_unlock(&hw->state_lock);
+}
+
+
 static int
 virtio_user_server_reconnect(struct virtio_user_dev *dev)
 {
 	int ret;
 	int connectfd;
 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+	struct virtio_hw *hw = eth_dev->data->dev_private;
 
 	connectfd = accept(dev->listenfd, NULL, NULL);
 	if (connectfd < 0)
@@ -51,6 +87,10 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
 
 	dev->features &= dev->device_features;
 
+	/* For packed ring, resetting queues is required in reconnection. */
+	if (vtpci_packed_queue(hw))
+		virtio_user_reset_queues_packed(eth_dev);
+
 	ret = virtio_user_start_device(dev);
 	if (ret < 0)
 		return -1;
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 5ff1e3587..0b4e3bf3e 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -141,3 +141,74 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
 	else
 		virtqueue_rxvq_flush_split(vq);
 }
+
+int
+virtqueue_rxvq_reset_packed(struct virtqueue *vq)
+{
+	int size = vq->vq_nentries;
+	struct vq_desc_extra *dxp;
+	struct virtnet_rx *rxvq;
+	uint16_t desc_idx;
+
+	vq->vq_used_cons_idx = 0;
+	vq->vq_desc_head_idx = 0;
+	vq->vq_avail_idx = 0;
+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+	vq->vq_free_cnt = vq->vq_nentries;
+
+	vq->vq_packed.used_wrap_counter = 1;
+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+	vq->vq_packed.event_flags_shadow = 0;
+	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
+
+	rxvq = &vq->rxq;
+	memset(rxvq->mz->addr, 0, rxvq->mz->len);
+
+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+		dxp = &vq->vq_descx[desc_idx];
+		if (dxp->cookie != NULL) {
+			rte_pktmbuf_free(dxp->cookie);
+			dxp->cookie = NULL;
+		}
+	}
+
+	vring_desc_init_packed(vq, size);
+
+	return 0;
+}
+
+int
+virtqueue_txvq_reset_packed(struct virtqueue *vq)
+{
+	int size = vq->vq_nentries;
+	struct vq_desc_extra *dxp;
+	struct virtnet_tx *txvq;
+	uint16_t desc_idx;
+
+	vq->vq_used_cons_idx = 0;
+	vq->vq_desc_head_idx = 0;
+	vq->vq_avail_idx = 0;
+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+	vq->vq_free_cnt = vq->vq_nentries;
+
+	vq->vq_packed.used_wrap_counter = 1;
+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+	vq->vq_packed.event_flags_shadow = 0;
+
+	txvq = &vq->txq;
+	memset(txvq->mz->addr, 0, txvq->mz->len);
+	memset(txvq->virtio_net_hdr_mz->addr, 0,
+		txvq->virtio_net_hdr_mz->len);
+
+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+		dxp = &vq->vq_descx[desc_idx];
+		if (dxp->cookie != NULL) {
+			rte_pktmbuf_free(dxp->cookie);
+			dxp->cookie = NULL;
+		}
+	}
+
+	vring_desc_init_packed(vq, size);
+
+	return 0;
+}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 8d7f197b1..58ad7309a 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -443,6 +443,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
 /* Flush the elements in the used ring. */
 void virtqueue_rxvq_flush(struct virtqueue *vq);
 
+int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
+
+int virtqueue_txvq_reset_packed(struct virtqueue *vq);
+
 static inline int
 virtqueue_full(const struct virtqueue *vq)
 {
-- 
2.17.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-stable] [PATCH v4] net/virtio-user: fix packed ring server mode
  2019-12-23  7:25   ` [dpdk-stable] [PATCH v4] " Xuan Ding
@ 2020-01-14 15:04     ` Maxime Coquelin
  2020-01-15  6:08       ` Ding, Xuan
  2020-01-15  6:13     ` [dpdk-stable] [PATCH v5] " Xuan Ding
  1 sibling, 1 reply; 14+ messages in thread
From: Maxime Coquelin @ 2020-01-14 15:04 UTC (permalink / raw)
  To: Xuan Ding, tiwei.bie, zhihong.wang, yong.liu, xiaolong.ye; +Cc: dev, stable



On 12/23/19 8:25 AM, Xuan Ding wrote:
> This patch fixes the situation where datapath does not work properly when
> vhost reconnects to virtio in server mode with packed ring.
> 
> Currently, virtio and vhost share memory of vring. For split ring, vhost
> can read the status of discriptors directly from the available ring and

s/discriptors/descriptors/

> the used ring during reconnection. Therefore, the datapath can continue.
> 
> But for packed ring, when reconnecting to virtio, vhost cannot get the
> status of discriptors only through the descriptor ring. By resetting Tx

s/discriptor only through/descriptor via/

> and Rx queues, the datapath can restart from the beginning.
> 
> Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> ---
> 
> v4:
> * Moved change log below '---' marker.
> 
> v3:
> * Removed an extra asterisk from a comment.
> * Renamed device reset function and moved it to virtio_user_ethdev.c.
> 
> v2:
> * Renamed queue reset functions and moved them to virtqueue.c.
> 
>  drivers/net/virtio/virtio_ethdev.c      |  4 +-
>  drivers/net/virtio/virtio_user_ethdev.c | 40 ++++++++++++++
>  drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
>  drivers/net/virtio/virtqueue.h          |  4 ++
>  4 files changed, 117 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index 044eb10a7..f9d0ea70d 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -1913,6 +1913,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
>  			goto err_vtpci_init;
>  	}
>  
> +	rte_spinlock_init(&hw->state_lock);
> +
>  	/* reset device and negotiate default features */
>  	ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
>  	if (ret < 0)
> @@ -2155,8 +2157,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>  			return -EBUSY;
>  		}
>  
> -	rte_spinlock_init(&hw->state_lock);
> -
>  	hw->use_simple_rx = 1;
>  
>  	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
> diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
> index 3fc172573..425f48230 100644
> --- a/drivers/net/virtio/virtio_user_ethdev.c
> +++ b/drivers/net/virtio/virtio_user_ethdev.c
> @@ -25,12 +25,48 @@
>  #define virtio_user_get_dev(hw) \
>  	((struct virtio_user_dev *)(hw)->virtio_user_dev)
>  
> +static void
> +virtio_user_reset_queues_packed(struct rte_eth_dev *dev)
> +{
> +	struct virtio_hw *hw = dev->data->dev_private;
> +	struct virtnet_rx *rxvq;
> +	struct virtnet_tx *txvq;
> +	uint16_t i;
> +
> +	/* Add lock to avoid queue contention. */
> +	rte_spinlock_lock(&hw->state_lock);
> +	hw->started = 0;
> +
> +	/*
> +	 * Waitting for datapath to complete before resetting queues.
> +	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
> +	 */
> +	rte_delay_ms(1);
> +
> +	/* Vring reset for each Tx queue and Rx queue. */
> +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> +		rxvq = dev->data->rx_queues[i];
> +		virtqueue_rxvq_reset_packed(rxvq->vq);
> +		virtio_dev_rx_queue_setup_finish(dev, i);
> +	}
> +
> +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> +		txvq = dev->data->tx_queues[i];
> +		virtqueue_txvq_reset_packed(txvq->vq);
> +	}
> +
> +	hw->started = 1;
> +	rte_spinlock_unlock(&hw->state_lock);
> +}
> +
> +
>  static int
>  virtio_user_server_reconnect(struct virtio_user_dev *dev)
>  {
>  	int ret;
>  	int connectfd;
>  	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
> +	struct virtio_hw *hw = eth_dev->data->dev_private;
>  
>  	connectfd = accept(dev->listenfd, NULL, NULL);
>  	if (connectfd < 0)
> @@ -51,6 +87,10 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
>  
>  	dev->features &= dev->device_features;
>  
> +	/* For packed ring, resetting queues is required in reconnection. */
> +	if (vtpci_packed_queue(hw))
> +		virtio_user_reset_queues_packed(eth_dev);

I think we should log an error here to state that some packets are
likely to be dropped.

Other than that, I don't think we have other choices than doing that
right now, so I'll ack your patch once commit message is fixed and
error message is printed on vrings reset.

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-stable] [PATCH v4] net/virtio-user: fix packed ring server mode
  2020-01-14 15:04     ` Maxime Coquelin
@ 2020-01-15  6:08       ` Ding, Xuan
  0 siblings, 0 replies; 14+ messages in thread
From: Ding, Xuan @ 2020-01-15  6:08 UTC (permalink / raw)
  To: Maxime Coquelin, Bie, Tiwei, Wang, Zhihong, Liu, Yong, Ye, Xiaolong
  Cc: dev, stable

Hi Maxime, 

Replies are inline.

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Tuesday, January 14, 2020 11:04 PM
> To: Ding, Xuan <xuan.ding@intel.com>; Bie, Tiwei <tiwei.bie@intel.com>; Wang,
> Zhihong <zhihong.wang@intel.com>; Liu, Yong <yong.liu@intel.com>; Ye,
> Xiaolong <xiaolong.ye@intel.com>
> Cc: dev@dpdk.org; stable@dpdk.org
> Subject: Re: [PATCH v4] net/virtio-user: fix packed ring server mode
> 
> 
> 
> On 12/23/19 8:25 AM, Xuan Ding wrote:
> > This patch fixes the situation where datapath does not work properly
> > when vhost reconnects to virtio in server mode with packed ring.
> >
> > Currently, virtio and vhost share memory of vring. For split ring,
> > vhost can read the status of discriptors directly from the available
> > ring and
> 
> s/discriptors/descriptors/
> 
> > the used ring during reconnection. Therefore, the datapath can continue.
> >
> > But for packed ring, when reconnecting to virtio, vhost cannot get the
> > status of discriptors only through the descriptor ring. By resetting
> > Tx
> 
> s/discriptor only through/descriptor via/
> 

Thanks, this will be corrected in the new version.

> > and Rx queues, the datapath can restart from the beginning.
> >
> > Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> > ---
> >
> > v4:
> > * Moved change log below '---' marker.
> >
> > v3:
> > * Removed an extra asterisk from a comment.
> > * Renamed device reset function and moved it to virtio_user_ethdev.c.
> >
> > v2:
> > * Renamed queue reset functions and moved them to virtqueue.c.
> >
> >  drivers/net/virtio/virtio_ethdev.c      |  4 +-
> >  drivers/net/virtio/virtio_user_ethdev.c | 40 ++++++++++++++
> >  drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
> >  drivers/net/virtio/virtqueue.h          |  4 ++
> >  4 files changed, 117 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/virtio/virtio_ethdev.c
> > b/drivers/net/virtio/virtio_ethdev.c
> > index 044eb10a7..f9d0ea70d 100644
> > --- a/drivers/net/virtio/virtio_ethdev.c
> > +++ b/drivers/net/virtio/virtio_ethdev.c
> > @@ -1913,6 +1913,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
> >  			goto err_vtpci_init;
> >  	}
> >
> > +	rte_spinlock_init(&hw->state_lock);
> > +
> >  	/* reset device and negotiate default features */
> >  	ret = virtio_init_device(eth_dev,
> VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
> >  	if (ret < 0)
> > @@ -2155,8 +2157,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
> >  			return -EBUSY;
> >  		}
> >
> > -	rte_spinlock_init(&hw->state_lock);
> > -
> >  	hw->use_simple_rx = 1;
> >
> >  	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { diff --git
> > a/drivers/net/virtio/virtio_user_ethdev.c
> > b/drivers/net/virtio/virtio_user_ethdev.c
> > index 3fc172573..425f48230 100644
> > --- a/drivers/net/virtio/virtio_user_ethdev.c
> > +++ b/drivers/net/virtio/virtio_user_ethdev.c
> > @@ -25,12 +25,48 @@
> >  #define virtio_user_get_dev(hw) \
> >  	((struct virtio_user_dev *)(hw)->virtio_user_dev)
> >
> > +static void
> > +virtio_user_reset_queues_packed(struct rte_eth_dev *dev) {
> > +	struct virtio_hw *hw = dev->data->dev_private;
> > +	struct virtnet_rx *rxvq;
> > +	struct virtnet_tx *txvq;
> > +	uint16_t i;
> > +
> > +	/* Add lock to avoid queue contention. */
> > +	rte_spinlock_lock(&hw->state_lock);
> > +	hw->started = 0;
> > +
> > +	/*
> > +	 * Waitting for datapath to complete before resetting queues.
> > +	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
> > +	 */
> > +	rte_delay_ms(1);
> > +
> > +	/* Vring reset for each Tx queue and Rx queue. */
> > +	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > +		rxvq = dev->data->rx_queues[i];
> > +		virtqueue_rxvq_reset_packed(rxvq->vq);
> > +		virtio_dev_rx_queue_setup_finish(dev, i);
> > +	}
> > +
> > +	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > +		txvq = dev->data->tx_queues[i];
> > +		virtqueue_txvq_reset_packed(txvq->vq);
> > +	}
> > +
> > +	hw->started = 1;
> > +	rte_spinlock_unlock(&hw->state_lock);
> > +}
> > +
> > +
> >  static int
> >  virtio_user_server_reconnect(struct virtio_user_dev *dev)  {
> >  	int ret;
> >  	int connectfd;
> >  	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
> > +	struct virtio_hw *hw = eth_dev->data->dev_private;
> >
> >  	connectfd = accept(dev->listenfd, NULL, NULL);
> >  	if (connectfd < 0)
> > @@ -51,6 +87,10 @@ virtio_user_server_reconnect(struct virtio_user_dev
> > *dev)
> >
> >  	dev->features &= dev->device_features;
> >
> > +	/* For packed ring, resetting queues is required in reconnection. */
> > +	if (vtpci_packed_queue(hw))
> > +		virtio_user_reset_queues_packed(eth_dev);
> 
> I think we should log an error here to state that some packets are likely to be
> dropped.
> 
> Other than that, I don't think we have other choices than doing that right now,
> so I'll ack your patch once commit message is fixed and error message is printed
> on vrings reset.
> 

Notice message will be added in the new version. This message will remind user that packets on the fly will be dropped when packed ring reconnecting.
Thank you for your comments.

Regards,
Xuan

> Thanks,
> Maxime


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [dpdk-stable] [PATCH v5] net/virtio-user: fix packed ring server mode
  2019-12-23  7:25   ` [dpdk-stable] [PATCH v4] " Xuan Ding
  2020-01-14 15:04     ` Maxime Coquelin
@ 2020-01-15  6:13     ` Xuan Ding
  2020-01-15 10:47       ` Maxime Coquelin
  2020-01-15 11:16       ` Maxime Coquelin
  1 sibling, 2 replies; 14+ messages in thread
From: Xuan Ding @ 2020-01-15  6:13 UTC (permalink / raw)
  To: maxime.coquelin, tiwei.bie, zhihong.wang, yong.liu, xiaolong.ye
  Cc: dev, Xuan Ding, stable

This patch fixes the situation where data path does not work properly when
vhost reconnects to virtio in server mode with packed ring.

Currently, virtio and vhost share memory of vring. For split ring, vhost
can read the status of descriptors directly from the available ring and
the used ring during reconnection. Therefore, the data path can continue.

But for packed ring, when reconnecting to virtio, vhost cannot get the
status of descriptors via the descriptor ring. By resetting Tx
and Rx queues, the data path can restart from the beginning.

Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
Cc: stable@dpdk.org

Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---

v5:
* Fixed two spelling mistakes in the commit log.
* Added notice message when resetting vring.

v4:
* Moved change log below '---' marker.

v3:
* Removed an extra asterisk from a comment.
* Renamed device reset function and moved it to virtio_user_ethdev.c.

v2:
* Renamed queue reset functions and moved them to virtqueue.c.
---
 drivers/net/virtio/virtio_ethdev.c      |  4 +-
 drivers/net/virtio/virtio_user_ethdev.c | 42 +++++++++++++++
 drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
 drivers/net/virtio/virtqueue.h          |  4 ++
 4 files changed, 119 insertions(+), 2 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 044eb10a7..f9d0ea70d 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1913,6 +1913,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 			goto err_vtpci_init;
 	}
 
+	rte_spinlock_init(&hw->state_lock);
+
 	/* reset device and negotiate default features */
 	ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
 	if (ret < 0)
@@ -2155,8 +2157,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 			return -EBUSY;
 		}
 
-	rte_spinlock_init(&hw->state_lock);
-
 	hw->use_simple_rx = 1;
 
 	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 3fc172573..9c9d3407f 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -25,12 +25,48 @@
 #define virtio_user_get_dev(hw) \
 	((struct virtio_user_dev *)(hw)->virtio_user_dev)
 
+static void
+virtio_user_reset_queues_packed(struct rte_eth_dev *dev)
+{
+	struct virtio_hw *hw = dev->data->dev_private;
+	struct virtnet_rx *rxvq;
+	struct virtnet_tx *txvq;
+	uint16_t i;
+
+	/* Add lock to avoid queue contention. */
+	rte_spinlock_lock(&hw->state_lock);
+	hw->started = 0;
+
+	/*
+	 * Waitting for datapath to complete before resetting queues.
+	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
+	 */
+	rte_delay_ms(1);
+
+	/* Vring reset for each Tx queue and Rx queue. */
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxvq = dev->data->rx_queues[i];
+		virtqueue_rxvq_reset_packed(rxvq->vq);
+		virtio_dev_rx_queue_setup_finish(dev, i);
+	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txvq = dev->data->tx_queues[i];
+		virtqueue_txvq_reset_packed(txvq->vq);
+	}
+
+	hw->started = 1;
+	rte_spinlock_unlock(&hw->state_lock);
+}
+
+
 static int
 virtio_user_server_reconnect(struct virtio_user_dev *dev)
 {
 	int ret;
 	int connectfd;
 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+	struct virtio_hw *hw = eth_dev->data->dev_private;
 
 	connectfd = accept(dev->listenfd, NULL, NULL);
 	if (connectfd < 0)
@@ -51,6 +87,12 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
 
 	dev->features &= dev->device_features;
 
+	/* For packed ring, resetting queues is required in reconnection. */
+	if (vtpci_packed_queue(hw))
+		PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
+				" when packed ring reconnecting.");
+		virtio_user_reset_queues_packed(eth_dev);
+
 	ret = virtio_user_start_device(dev);
 	if (ret < 0)
 		return -1;
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 5ff1e3587..0b4e3bf3e 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -141,3 +141,74 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
 	else
 		virtqueue_rxvq_flush_split(vq);
 }
+
+int
+virtqueue_rxvq_reset_packed(struct virtqueue *vq)
+{
+	int size = vq->vq_nentries;
+	struct vq_desc_extra *dxp;
+	struct virtnet_rx *rxvq;
+	uint16_t desc_idx;
+
+	vq->vq_used_cons_idx = 0;
+	vq->vq_desc_head_idx = 0;
+	vq->vq_avail_idx = 0;
+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+	vq->vq_free_cnt = vq->vq_nentries;
+
+	vq->vq_packed.used_wrap_counter = 1;
+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+	vq->vq_packed.event_flags_shadow = 0;
+	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
+
+	rxvq = &vq->rxq;
+	memset(rxvq->mz->addr, 0, rxvq->mz->len);
+
+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+		dxp = &vq->vq_descx[desc_idx];
+		if (dxp->cookie != NULL) {
+			rte_pktmbuf_free(dxp->cookie);
+			dxp->cookie = NULL;
+		}
+	}
+
+	vring_desc_init_packed(vq, size);
+
+	return 0;
+}
+
+int
+virtqueue_txvq_reset_packed(struct virtqueue *vq)
+{
+	int size = vq->vq_nentries;
+	struct vq_desc_extra *dxp;
+	struct virtnet_tx *txvq;
+	uint16_t desc_idx;
+
+	vq->vq_used_cons_idx = 0;
+	vq->vq_desc_head_idx = 0;
+	vq->vq_avail_idx = 0;
+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+	vq->vq_free_cnt = vq->vq_nentries;
+
+	vq->vq_packed.used_wrap_counter = 1;
+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+	vq->vq_packed.event_flags_shadow = 0;
+
+	txvq = &vq->txq;
+	memset(txvq->mz->addr, 0, txvq->mz->len);
+	memset(txvq->virtio_net_hdr_mz->addr, 0,
+		txvq->virtio_net_hdr_mz->len);
+
+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
+		dxp = &vq->vq_descx[desc_idx];
+		if (dxp->cookie != NULL) {
+			rte_pktmbuf_free(dxp->cookie);
+			dxp->cookie = NULL;
+		}
+	}
+
+	vring_desc_init_packed(vq, size);
+
+	return 0;
+}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 8d7f197b1..58ad7309a 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -443,6 +443,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
 /* Flush the elements in the used ring. */
 void virtqueue_rxvq_flush(struct virtqueue *vq);
 
+int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
+
+int virtqueue_txvq_reset_packed(struct virtqueue *vq);
+
 static inline int
 virtqueue_full(const struct virtqueue *vq)
 {
-- 
2.17.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-stable] [PATCH v5] net/virtio-user: fix packed ring server mode
  2020-01-15  6:13     ` [dpdk-stable] [PATCH v5] " Xuan Ding
@ 2020-01-15 10:47       ` Maxime Coquelin
  2020-01-15 11:16       ` Maxime Coquelin
  1 sibling, 0 replies; 14+ messages in thread
From: Maxime Coquelin @ 2020-01-15 10:47 UTC (permalink / raw)
  To: Xuan Ding, tiwei.bie, zhihong.wang, yong.liu, xiaolong.ye; +Cc: dev, stable



On 1/15/20 7:13 AM, Xuan Ding wrote:
> This patch fixes the situation where data path does not work properly when
> vhost reconnects to virtio in server mode with packed ring.
> 
> Currently, virtio and vhost share memory of vring. For split ring, vhost
> can read the status of descriptors directly from the available ring and
> the used ring during reconnection. Therefore, the data path can continue.
> 
> But for packed ring, when reconnecting to virtio, vhost cannot get the
> status of descriptors via the descriptor ring. By resetting Tx
> and Rx queues, the data path can restart from the beginning.
> 
> Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> ---
> 
> v5:
> * Fixed two spelling mistakes in the commit log.
> * Added notice message when resetting vring.
> 
> v4:
> * Moved change log below '---' marker.
> 
> v3:
> * Removed an extra asterisk from a comment.
> * Renamed device reset function and moved it to virtio_user_ethdev.c.
> 
> v2:
> * Renamed queue reset functions and moved them to virtqueue.c.
> ---
>  drivers/net/virtio/virtio_ethdev.c      |  4 +-
>  drivers/net/virtio/virtio_user_ethdev.c | 42 +++++++++++++++
>  drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
>  drivers/net/virtio/virtqueue.h          |  4 ++
>  4 files changed, 119 insertions(+), 2 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-stable] [PATCH v5] net/virtio-user: fix packed ring server mode
  2020-01-15  6:13     ` [dpdk-stable] [PATCH v5] " Xuan Ding
  2020-01-15 10:47       ` Maxime Coquelin
@ 2020-01-15 11:16       ` Maxime Coquelin
  2020-01-15 15:40         ` Ferruh Yigit
  1 sibling, 1 reply; 14+ messages in thread
From: Maxime Coquelin @ 2020-01-15 11:16 UTC (permalink / raw)
  To: Xuan Ding, tiwei.bie, zhihong.wang, yong.liu, xiaolong.ye; +Cc: dev, stable



On 1/15/20 7:13 AM, Xuan Ding wrote:
> This patch fixes the situation where data path does not work properly when
> vhost reconnects to virtio in server mode with packed ring.
> 
> Currently, virtio and vhost share memory of vring. For split ring, vhost
> can read the status of descriptors directly from the available ring and
> the used ring during reconnection. Therefore, the data path can continue.
> 
> But for packed ring, when reconnecting to virtio, vhost cannot get the
> status of descriptors via the descriptor ring. By resetting Tx
> and Rx queues, the data path can restart from the beginning.
> 
> Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> ---
> 
> v5:
> * Fixed two spelling mistakes in the commit log.
> * Added notice message when resetting vring.
> 
> v4:
> * Moved change log below '---' marker.
> 
> v3:
> * Removed an extra asterisk from a comment.
> * Renamed device reset function and moved it to virtio_user_ethdev.c.
> 
> v2:
> * Renamed queue reset functions and moved them to virtqueue.c.
> ---
>  drivers/net/virtio/virtio_ethdev.c      |  4 +-
>  drivers/net/virtio/virtio_user_ethdev.c | 42 +++++++++++++++
>  drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
>  drivers/net/virtio/virtqueue.h          |  4 ++
>  4 files changed, 119 insertions(+), 2 deletions(-)

Applied to dpdk-next-virtio/master

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-stable] [PATCH v5] net/virtio-user: fix packed ring server mode
  2020-01-15 11:16       ` Maxime Coquelin
@ 2020-01-15 15:40         ` Ferruh Yigit
  2020-01-16  7:13           ` Ding, Xuan
  0 siblings, 1 reply; 14+ messages in thread
From: Ferruh Yigit @ 2020-01-15 15:40 UTC (permalink / raw)
  To: Maxime Coquelin, Xuan Ding, tiwei.bie, zhihong.wang, yong.liu,
	xiaolong.ye
  Cc: dev, stable

On 1/15/2020 11:16 AM, Maxime Coquelin wrote:
> 
> 
> On 1/15/20 7:13 AM, Xuan Ding wrote:
>> This patch fixes the situation where data path does not work properly when
>> vhost reconnects to virtio in server mode with packed ring.
>>
>> Currently, virtio and vhost share memory of vring. For split ring, vhost
>> can read the status of descriptors directly from the available ring and
>> the used ring during reconnection. Therefore, the data path can continue.
>>
>> But for packed ring, when reconnecting to virtio, vhost cannot get the
>> status of descriptors via the descriptor ring. By resetting Tx
>> and Rx queues, the data path can restart from the beginning.
>>
>> Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
>> Cc: stable@dpdk.org
>>
>> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
>> ---
>>
>> v5:
>> * Fixed two spelling mistakes in the commit log.
>> * Added notice message when resetting vring.
>>
>> v4:
>> * Moved change log below '---' marker.
>>
>> v3:
>> * Removed an extra asterisk from a comment.
>> * Renamed device reset function and moved it to virtio_user_ethdev.c.
>>
>> v2:
>> * Renamed queue reset functions and moved them to virtqueue.c.
>> ---
>>  drivers/net/virtio/virtio_ethdev.c      |  4 +-
>>  drivers/net/virtio/virtio_user_ethdev.c | 42 +++++++++++++++
>>  drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
>>  drivers/net/virtio/virtqueue.h          |  4 ++
>>  4 files changed, 119 insertions(+), 2 deletions(-)
> 
> Applied to dpdk-next-virtio/master
> 

This was causing build error [1] on cross compilation [2], I am fixing it while
merging [3], please double check in next-net.

[1]
.../drivers/net/virtio/virtio_user_ethdev.c:44:2: error: implicit declaration of
function ‘rte_delay_ms’; did you mean ‘rte_realloc’?
[-Werror=implicit-function-declaration]

  rte_delay_ms(1);


[2]
CROSS=/opt/aarch64/bin/aarch64-buildroot-linux-gnu- make -j64


[3]
diff --git a/drivers/net/virtio/virtio_user_ethdev.c
b/drivers/net/virtio/virtio_user_ethdev.c
index 9c9d3407f..f3b35d1bd 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -13,6 +13,7 @@
 #include <rte_ethdev_vdev.h>
 #include <rte_bus_vdev.h>
 #include <rte_alarm.h>
+#include <rte_cycles.h>

 #include "virtio_ethdev.h"
 #include "virtio_logs.h"

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [dpdk-stable] [PATCH v5] net/virtio-user: fix packed ring server mode
  2020-01-15 15:40         ` Ferruh Yigit
@ 2020-01-16  7:13           ` Ding, Xuan
  0 siblings, 0 replies; 14+ messages in thread
From: Ding, Xuan @ 2020-01-16  7:13 UTC (permalink / raw)
  To: Yigit, Ferruh, Maxime Coquelin, Bie, Tiwei, Wang, Zhihong, Liu,
	Yong, Ye, Xiaolong
  Cc: dev, stable

Hi Ferruh,

> -----Original Message-----
> From: Yigit, Ferruh <ferruh.yigit@intel.com>
> Sent: Wednesday, January 15, 2020 11:41 PM
> To: Maxime Coquelin <maxime.coquelin@redhat.com>; Ding, Xuan
> <xuan.ding@intel.com>; Bie, Tiwei <tiwei.bie@intel.com>; Wang, Zhihong
> <zhihong.wang@intel.com>; Liu, Yong <yong.liu@intel.com>; Ye, Xiaolong
> <xiaolong.ye@intel.com>
> Cc: dev@dpdk.org; stable@dpdk.org
> Subject: Re: [dpdk-stable] [PATCH v5] net/virtio-user: fix packed ring server
> mode
> 
> On 1/15/2020 11:16 AM, Maxime Coquelin wrote:
> >
> >
> > On 1/15/20 7:13 AM, Xuan Ding wrote:
> >> This patch fixes the situation where data path does not work properly
> >> when vhost reconnects to virtio in server mode with packed ring.
> >>
> >> Currently, virtio and vhost share memory of vring. For split ring,
> >> vhost can read the status of descriptors directly from the available
> >> ring and the used ring during reconnection. Therefore, the data path can
> continue.
> >>
> >> But for packed ring, when reconnecting to virtio, vhost cannot get
> >> the status of descriptors via the descriptor ring. By resetting Tx
> >> and Rx queues, the data path can restart from the beginning.
> >>
> >> Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
> >> Cc: stable@dpdk.org
> >>
> >> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> >> ---
> >>
> >> v5:
> >> * Fixed two spelling mistakes in the commit log.
> >> * Added notice message when resetting vring.
> >>
> >> v4:
> >> * Moved change log below '---' marker.
> >>
> >> v3:
> >> * Removed an extra asterisk from a comment.
> >> * Renamed device reset function and moved it to virtio_user_ethdev.c.
> >>
> >> v2:
> >> * Renamed queue reset functions and moved them to virtqueue.c.
> >> ---
> >>  drivers/net/virtio/virtio_ethdev.c      |  4 +-
> >>  drivers/net/virtio/virtio_user_ethdev.c | 42 +++++++++++++++
> >>  drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
> >>  drivers/net/virtio/virtqueue.h          |  4 ++
> >>  4 files changed, 119 insertions(+), 2 deletions(-)
> >
> > Applied to dpdk-next-virtio/master
> >
> 
> This was causing build error [1] on cross compilation [2], I am fixing it while
> merging [3], please double check in next-net.
> 
> [1]
> .../drivers/net/virtio/virtio_user_ethdev.c:44:2: error: implicit declaration of
> function ‘rte_delay_ms’; did you mean ‘rte_realloc’?
> [-Werror=implicit-function-declaration]
> 
>   rte_delay_ms(1);
> 
> 
> [2]
> CROSS=/opt/aarch64/bin/aarch64-buildroot-linux-gnu- make -j64
> 
> 
> [3]
> diff --git a/drivers/net/virtio/virtio_user_ethdev.c
> b/drivers/net/virtio/virtio_user_ethdev.c
> index 9c9d3407f..f3b35d1bd 100644
> --- a/drivers/net/virtio/virtio_user_ethdev.c
> +++ b/drivers/net/virtio/virtio_user_ethdev.c
> @@ -13,6 +13,7 @@
>  #include <rte_ethdev_vdev.h>
>  #include <rte_bus_vdev.h>
>  #include <rte_alarm.h>
> +#include <rte_cycles.h>
> 
>  #include "virtio_ethdev.h"
>  #include "virtio_logs.h"


After my check, the current call path to rte_cycles.h on the x86 platform is  virtio_user_ethdev.c -> virtqueue.h ->  rte_mempool.h -> rte_spinlock.h -> rte_cycles.h. I find that only rte_spinlock.h on the x86 platform includes the rte_cycles.h, while this file is not included on other platforms, so your fix is right. I also test the new patch(add #include <rte_cycles.h>) on my local, it works fine.

Thank you very much.

Regards,
Xuan

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2020-01-16  7:13 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-09 16:49 [dpdk-stable] [PATCH v1] net/virtio-user: fix packed ring server mode Xuan Ding
2019-12-09  8:51 ` [dpdk-stable] [dpdk-dev] " Liu, Yong
2019-12-12 11:08   ` Ding, Xuan
2019-12-18  2:24 ` [dpdk-stable] [PATCH v3] " Xuan Ding
2019-12-18  2:25   ` [dpdk-stable] [dpdk-dev] " Ye Xiaolong
2019-12-18  2:38     ` Ding, Xuan
2019-12-23  7:25   ` [dpdk-stable] [PATCH v4] " Xuan Ding
2020-01-14 15:04     ` Maxime Coquelin
2020-01-15  6:08       ` Ding, Xuan
2020-01-15  6:13     ` [dpdk-stable] [PATCH v5] " Xuan Ding
2020-01-15 10:47       ` Maxime Coquelin
2020-01-15 11:16       ` Maxime Coquelin
2020-01-15 15:40         ` Ferruh Yigit
2020-01-16  7:13           ` Ding, Xuan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).