patches for DPDK stable branches
 help / color / Atom feed
From: "Liu, Yong" <yong.liu@intel.com>
To: "Ding, Xuan" <xuan.ding@intel.com>,
	"maintainer@dpdk.org" <maintainer@dpdk.org>
Cc: "dev@dpdk.org" <dev@dpdk.org>,
	"maxime.coquelin@redhat.com" <maxime.coquelin@redhat.com>,
	"Bie, Tiwei" <tiwei.bie@intel.com>,
	"Wang, Zhihong" <zhihong.wang@intel.com>,
	"Ding, Xuan" <xuan.ding@intel.com>,
	"stable@dpdk.org" <stable@dpdk.org>
Subject: Re: [dpdk-stable] [dpdk-dev] [PATCH v1] net/virtio-user: fix packed ring server mode
Date: Mon, 9 Dec 2019 08:51:57 +0000
Message-ID: <86228AFD5BCD8E4EBFD2B90117B5E81E6341F8D8@SHSMSX103.ccr.corp.intel.com> (raw)
In-Reply-To: <20191209164939.54806-1-xuan.ding@intel.com>



> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Xuan Ding
> Sent: Tuesday, December 10, 2019 12:50 AM
> To: maintainer@dpdk.org
> Cc: dev@dpdk.org; maxime.coquelin@redhat.com; Bie, Tiwei
> <tiwei.bie@intel.com>; Wang, Zhihong <zhihong.wang@intel.com>; Ding, Xuan
> <xuan.ding@intel.com>; stable@dpdk.org
> Subject: [dpdk-dev] [PATCH v1] net/virtio-user: fix packed ring server
> mode
> 
> This patch fixes the situation where datapath does not work properly when
> vhost reconnects to virtio in server mode with packed ring.
> 
> Currently, virtio and vhost share memory of vring. For split ring, vhost
> can read the status of discriptors directly from the available ring and
> the used ring during reconnection. Therefore, the datapath can continue.
> 
> But for packed ring, when reconnecting to virtio, vhost cannot get the
> status of discriptors only through the descriptor ring. By resetting Tx
> and Rx queues, the datapath can restart from the beginning.
> 
> Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xuan Ding <xuan.ding@intel.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c      | 112 +++++++++++++++++++++++-
>  drivers/net/virtio/virtio_ethdev.h      |   3 +
>  drivers/net/virtio/virtio_user_ethdev.c |   8 ++
>  3 files changed, 121 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index 044eb10a7..c0cb0f23c 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -433,6 +433,94 @@ virtio_init_vring(struct virtqueue *vq)
>  	virtqueue_disable_intr(vq);
>  }
> 
> +static int
> +virtio_user_reset_rx_queues(struct rte_eth_dev *dev, uint16_t queue_idx)
> +{

Hi Xuan,
This function named as virtio_user_reset, but look like it has no relationship with virtio_user.
Maybe rename this function to virtqueue_reset and move it to virtqueue.c will be more suitable. 
Please also add suffix _packed as this function only workable for packed ring.

Thanks,
Marvin

> +	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
> +	struct virtio_hw *hw = dev->data->dev_private;
> +	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
> +	struct virtnet_rx *rxvq;
> +	struct vq_desc_extra *dxp;
> +	unsigned int vq_size;
> +	uint16_t desc_idx, i;
> +
> +	vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
> +
Virtqueue size has been set to vq_nentries in virtqueue structure. Do we need to re-catch it?

> +	vq->vq_packed.used_wrap_counter = 1;
> +	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> +	vq->vq_packed.event_flags_shadow = 0;
> +	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
> +
> +	rxvq = &vq->rxq;
> +	memset(rxvq->mz->addr, 0, rxvq->mz->len);
> +
> +	for (desc_idx = 0; desc_idx < vq_size; desc_idx++) {
> +		dxp = &vq->vq_descx[desc_idx];
> +		if (dxp->cookie != NULL) {
> +			rte_pktmbuf_free(dxp->cookie);
> +			dxp->cookie = NULL;
> +		}
> +	}
> +
> +	virtio_init_vring(vq);
> +
> +	for (i = 0; i < hw->max_queue_pairs; i++)
> +		if (rxvq->mpool != NULL)
> +			virtio_dev_rx_queue_setup_finish(dev, i);
> +

Please add parentheses for multiple lines loop content. 

> +	return 0;
> +}
> +
> +static int
> +virtio_user_reset_tx_queues(struct rte_eth_dev *dev, uint16_t queue_idx)
> +{
> +	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
> +	struct virtio_hw *hw = dev->data->dev_private;
> +	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
> +	struct virtnet_tx *txvq;
> +	struct vq_desc_extra *dxp;
> +	unsigned int vq_size;
> +	uint16_t desc_idx;
> +
> +	vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
> +
> +	vq->vq_packed.used_wrap_counter = 1;
> +	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> +	vq->vq_packed.event_flags_shadow = 0;
> +
> +	txvq = &vq->txq;
> +	memset(txvq->mz->addr, 0, txvq->mz->len);
> +	memset(txvq->virtio_net_hdr_mz->addr, 0,
> +		txvq->virtio_net_hdr_mz->len);
> +
> +	for (desc_idx = 0; desc_idx < vq_size; desc_idx++) {
> +		dxp = &vq->vq_descx[desc_idx];
> +		if (dxp->cookie != NULL) {
> +			rte_pktmbuf_free(dxp->cookie);
> +			dxp->cookie = NULL;
> +		}
> +	}
> +
> +	virtio_init_vring(vq);
> +
> +	return 0;
> +}
> +
> +static int
> +virtio_user_reset_queues(struct rte_eth_dev *eth_dev)
> +{
> +	uint16_t i;
> +
> +	/* Vring reset for each Tx queue and Rx queue. */
> +	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
> +		virtio_user_reset_rx_queues(eth_dev, i);
> +
> +	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
> +		virtio_user_reset_tx_queues(eth_dev, i);
> +
> +	return 0;
> +}
> +
>  static int
>  virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
>  {
> @@ -1913,6 +2001,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
>  			goto err_vtpci_init;
>  	}
> 
> +	rte_spinlock_init(&hw->state_lock);
> +
>  	/* reset device and negotiate default features */
>  	ret = virtio_init_device(eth_dev,
> VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
>  	if (ret < 0)
> @@ -2155,8 +2245,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>  			return -EBUSY;
>  		}
> 
> -	rte_spinlock_init(&hw->state_lock);
> -
>  	hw->use_simple_rx = 1;
> 
>  	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
> @@ -2421,6 +2509,26 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev
> *dev, int mask)
>  	return 0;
>  }
> 
> +int
> +virtio_user_reset_device(struct rte_eth_dev *eth_dev, struct virtio_hw
> *hw)
> +{
> +	/* Add lock to avoid queue contention. */
> +	rte_spinlock_lock(&hw->state_lock);
> +	hw->started = 0;
> +	/*
> +	 * Waitting for datapath to complete before resetting queues.
> +	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
> +	 */
> +	rte_delay_ms(1);
> +
> +	virtio_user_reset_queues(eth_dev);
> +
> +	hw->started = 1;
> +	rte_spinlock_unlock(&hw->state_lock);
> +
> +	return 0;
> +}
> +
>  static int
>  virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info
> *dev_info)
>  {
> diff --git a/drivers/net/virtio/virtio_ethdev.h
> b/drivers/net/virtio/virtio_ethdev.h
> index a10111758..72e9e3ff8 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -49,6 +49,9 @@
> 
>  extern const struct eth_dev_ops virtio_user_secondary_eth_dev_ops;
> 
> +int virtio_user_reset_device(struct rte_eth_dev *eth_dev,
> +		struct virtio_hw *hw);
> +
>  /*
>   * CQ function prototype
>   */
> diff --git a/drivers/net/virtio/virtio_user_ethdev.c
> b/drivers/net/virtio/virtio_user_ethdev.c
> index 3fc172573..49068a578 100644
> --- a/drivers/net/virtio/virtio_user_ethdev.c
> +++ b/drivers/net/virtio/virtio_user_ethdev.c
> @@ -31,6 +31,7 @@ virtio_user_server_reconnect(struct virtio_user_dev
> *dev)
>  	int ret;
>  	int connectfd;
>  	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
> +	struct virtio_hw *hw = eth_dev->data->dev_private;
> 
>  	connectfd = accept(dev->listenfd, NULL, NULL);
>  	if (connectfd < 0)
> @@ -51,6 +52,13 @@ virtio_user_server_reconnect(struct virtio_user_dev
> *dev)
> 
>  	dev->features &= dev->device_features;
> 
> +	/*
> +	 * * For packed ring, resetting queues
> +	 * is required in reconnection.
> +	 */
> +	if (vtpci_packed_queue(hw))
> +		virtio_user_reset_device(eth_dev, hw);
> +
>  	ret = virtio_user_start_device(dev);
>  	if (ret < 0)
>  		return -1;
> --
> 2.17.1


  reply index

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-09 16:49 [dpdk-stable] " Xuan Ding
2019-12-09  8:51 ` Liu, Yong [this message]
2019-12-12 11:08   ` [dpdk-stable] [dpdk-dev] " Ding, Xuan
2019-12-18  2:24 ` [dpdk-stable] [PATCH v3] " Xuan Ding
2019-12-18  2:25   ` [dpdk-stable] [dpdk-dev] " Ye Xiaolong
2019-12-18  2:38     ` Ding, Xuan
2019-12-23  7:25   ` [dpdk-stable] [PATCH v4] " Xuan Ding
2020-01-14 15:04     ` Maxime Coquelin
2020-01-15  6:08       ` Ding, Xuan
2020-01-15  6:13     ` [dpdk-stable] [PATCH v5] " Xuan Ding
2020-01-15 10:47       ` Maxime Coquelin
2020-01-15 11:16       ` Maxime Coquelin
2020-01-15 15:40         ` Ferruh Yigit
2020-01-16  7:13           ` Ding, Xuan

Reply instructions:

You may reply publically to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=86228AFD5BCD8E4EBFD2B90117B5E81E6341F8D8@SHSMSX103.ccr.corp.intel.com \
    --to=yong.liu@intel.com \
    --cc=dev@dpdk.org \
    --cc=maintainer@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=stable@dpdk.org \
    --cc=tiwei.bie@intel.com \
    --cc=xuan.ding@intel.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

patches for DPDK stable branches

Archives are clonable:
	git clone --mirror http://inbox.dpdk.org/stable/0 stable/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 stable stable/ http://inbox.dpdk.org/stable \
		stable@dpdk.org
	public-inbox-index stable


Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.stable


AGPL code for this site: git clone https://public-inbox.org/ public-inbox