DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Wang, Yinan" <yinan.wang@intel.com>
To: "Liu, Yong" <yong.liu@intel.com>,
	"maxime.coquelin@redhat.com" <maxime.coquelin@redhat.com>,
	"Wang, Zhihong" <zhihong.wang@intel.com>,
	"Ye, Xiaolong" <xiaolong.ye@intel.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>, "Ding, Xuan" <xuan.ding@intel.com>,
	"Liu, Yong" <yong.liu@intel.com>
Subject: Re: [dpdk-dev] [PATCH] net/virtio: fix crash when device reconnecting
Date: Wed, 15 Apr 2020 01:06:43 +0000	[thread overview]
Message-ID: <f9c322abc2834318924586e27d2a460b@intel.com> (raw)
In-Reply-To: <20200414125555.86601-1-yong.liu@intel.com>

Tested-by: Wang, Yinan <yinan.wang@intel.com>

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Marvin Liu
> Sent: 2020年4月14日 20:56
> To: maxime.coquelin@redhat.com; Wang, Zhihong <zhihong.wang@intel.com>;
> Ye, Xiaolong <xiaolong.ye@intel.com>
> Cc: dev@dpdk.org; Ding, Xuan <xuan.ding@intel.com>; Liu, Yong
> <yong.liu@intel.com>
> Subject: [dpdk-dev] [PATCH] net/virtio: fix crash when device reconnecting
> 
> When doing virtio device initialization, virtqueues will be reset in server mode if
> ring type is packed. This will cause issue because queues have been freed in the
> beginning of device initialization.
> 
> Fix this issue by splitting device initial process and device reinit process. Virt
> queues won't be freed or realloc in reinit process. Also moved virtio device
> initialization from configuration to start stage, which can reduce number of
> reinitialization times.
> 
> Fixes: 6ebbf4109f35 ("net/virtio-user: fix packed ring server mode")
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index 21570e5cf..8c84bfe91 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -1670,7 +1670,9 @@ virtio_configure_intr(struct rte_eth_dev *dev)
> 
>  /* reset device and renegotiate features if needed */  static int -
> virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
> +virtio_init_device(struct rte_eth_dev *eth_dev,
> +		   uint64_t req_features,
> +		   bool reinit)
>  {
>  	struct virtio_hw *hw = eth_dev->data->dev_private;
>  	struct virtio_net_config *config;
> @@ -1681,7 +1683,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev,
> uint64_t req_features)
>  	/* Reset the device although not necessary at startup */
>  	vtpci_reset(hw);
> 
> -	if (hw->vqs) {
> +	if (hw->vqs && !reinit) {
>  		virtio_dev_free_mbufs(eth_dev);
>  		virtio_free_queues(hw);
>  	}
> @@ -1794,9 +1796,11 @@ virtio_init_device(struct rte_eth_dev *eth_dev,
> uint64_t req_features)
>  			VLAN_TAG_LEN - hw->vtnet_hdr_size;
>  	}
> 
> -	ret = virtio_alloc_queues(eth_dev);
> -	if (ret < 0)
> -		return ret;
> +	if (!reinit) {
> +		ret = virtio_alloc_queues(eth_dev);
> +		if (ret < 0)
> +			return ret;
> +	}
> 
>  	if (eth_dev->data->dev_conf.intr_conf.rxq) {
>  		if (virtio_configure_intr(eth_dev) < 0) { @@ -1925,7 +1929,8
> @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
>  	rte_spinlock_init(&hw->state_lock);
> 
>  	/* reset device and negotiate default features */
> -	ret = virtio_init_device(eth_dev,
> VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
> +	ret = virtio_init_device(eth_dev,
> VIRTIO_PMD_DEFAULT_GUEST_FEATURES,
> +			false);
>  	if (ret < 0)
>  		goto err_virtio_init;
> 
> @@ -2091,12 +2096,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>  		return -EINVAL;
>  	}
> 
> -	if (dev->data->dev_conf.intr_conf.rxq) {
> -		ret = virtio_init_device(dev, hw->req_guest_features);
> -		if (ret < 0)
> -			return ret;
> -	}
> -
>  	if (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len)
>  		req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
> 
> @@ -2120,7 +2119,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
> 
>  	/* if request features changed, reinit the device */
>  	if (req_features != hw->req_guest_features) {
> -		ret = virtio_init_device(dev, req_features);
> +		ret = virtio_negotiate_features(hw, req_features);
>  		if (ret < 0)
>  			return ret;
>  	}
> @@ -2235,6 +2234,11 @@ virtio_dev_start(struct rte_eth_dev *dev)
>  	struct virtio_hw *hw = dev->data->dev_private;
>  	int ret;
> 
> +	/* reinit the device */
> +	ret = virtio_init_device(dev, hw->req_guest_features, true);
> +	if (ret < 0)
> +		return ret;
> +
>  	/* Finish the initialization of the queues */
>  	for (i = 0; i < dev->data->nb_rx_queues; i++) {
>  		ret = virtio_dev_rx_queue_setup_finish(dev, i);
> --
> 2.17.1


  reply	other threads:[~2020-04-15  1:06 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-14 12:55 Marvin Liu
2020-04-15  1:06 ` Wang, Yinan [this message]
2020-04-15  7:24 ` Ye Xiaolong
2020-04-15  7:30   ` Liu, Yong
2020-04-17 15:17     ` Maxime Coquelin
2020-04-19  1:35       ` Liu, Yong
2020-04-19  3:08         ` Ye Xiaolong
2020-05-06 15:07 ` [dpdk-dev] [PATCH v2] " Marvin Liu
2020-05-07 10:53   ` Maxime Coquelin
2020-05-07 14:18   ` Maxime Coquelin
2020-05-08  1:54     ` Wang, Yinan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=f9c322abc2834318924586e27d2a460b@intel.com \
    --to=yinan.wang@intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=xiaolong.ye@intel.com \
    --cc=xuan.ding@intel.com \
    --cc=yong.liu@intel.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).