From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id 2A44D1BA36 for ; Tue, 10 Apr 2018 08:39:44 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 09 Apr 2018 23:39:43 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.48,430,1517904000"; d="scan'208";a="215389759" Received: from dpdk-dev.sh.intel.com ([10.67.111.147]) by orsmga005.jf.intel.com with ESMTP; 09 Apr 2018 23:39:41 -0700 From: Junjie Chen To: jianfeng.tan@intel.com, maxime.coquelin@redhat.com, mtetsuyah@gmail.com Cc: dev@dpdk.org, Junjie Chen Date: Tue, 10 Apr 2018 10:18:09 -0400 Message-Id: <1523369889-73457-1-git-send-email-junjie.j.chen@intel.com> X-Mailer: git-send-email 2.0.1 Subject: [dpdk-dev] [PATCH] net/vhost: fix vhost invalid state X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 10 Apr 2018 06:39:45 -0000 dev_start sets *dev_attached* after setup queues, this sets device to invalid state since no frontend is attached. Also destroy_device set *started* to zero which makes *allow_queuing* always zero until dev_start get called again. Actually, we should not determine queues existence by *dev_attached* but by queues pointers or other separated variable(s). Fixes: 30a701a53737 ("net/vhost: fix crash when creating vdev dynamically") Signed-off-by: Junjie Chen --- drivers/net/vhost/rte_eth_vhost.c | 64 +++++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 26 deletions(-) diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c index 11b6076..6a2ff76 100644 --- a/drivers/net/vhost/rte_eth_vhost.c +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -118,6 +118,7 @@ struct pmd_internal { char *iface_name; uint16_t max_queues; uint16_t vid; + uint16_t queue_ready; rte_atomic32_t started; }; @@ -528,10 +529,13 @@ update_queuing_status(struct rte_eth_dev *dev) unsigned int i; int allow_queuing = 1; - if (rte_atomic32_read(&internal->dev_attached) == 0) + if (!dev->data->rx_queues || !dev->data->tx_queues) { + RTE_LOG(ERR, PMD, "RX/TX queues not setup yet\n"); return; + } - if (rte_atomic32_read(&internal->started) == 0) + if (rte_atomic32_read(&internal->started) == 0 || + rte_atomic32_read(&internal->dev_attached) == 0) allow_queuing = 0; /* Wait until rx/tx_pkt_burst stops accessing vhost device */ @@ -576,6 +580,8 @@ queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal) vq->internal = internal; vq->port = eth_dev->data->port_id; } + + internal->queue_ready = 1; } static int @@ -607,13 +613,10 @@ new_device(int vid) #endif internal->vid = vid; - if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) { + if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) queue_setup(eth_dev, internal); - rte_atomic32_set(&internal->dev_attached, 1); - } else { - RTE_LOG(INFO, PMD, "RX/TX queues have not setup yet\n"); - rte_atomic32_set(&internal->dev_attached, 0); - } + else + RTE_LOG(INFO, PMD, "RX/TX queues not setup yet\n"); for (i = 0; i < rte_vhost_get_vring_num(vid); i++) rte_vhost_enable_guest_notification(vid, i, 0); @@ -622,6 +625,7 @@ new_device(int vid) eth_dev->data->dev_link.link_status = ETH_LINK_UP; + rte_atomic32_set(&internal->dev_attached, 1); update_queuing_status(eth_dev); RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid); @@ -657,17 +661,19 @@ destroy_device(int vid) eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; - for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { - vq = eth_dev->data->rx_queues[i]; - if (vq == NULL) - continue; - vq->vid = -1; - } - for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { - vq = eth_dev->data->tx_queues[i]; - if (vq == NULL) - continue; - vq->vid = -1; + if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) { + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { + vq = eth_dev->data->rx_queues[i]; + if (!vq) + continue; + vq->vid = -1; + } + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + vq = eth_dev->data->tx_queues[i]; + if (!vq) + continue; + vq->vid = -1; + } } state = vring_states[eth_dev->data->port_id]; @@ -792,11 +798,14 @@ eth_dev_start(struct rte_eth_dev *eth_dev) { struct pmd_internal *internal = eth_dev->data->dev_private; - if (unlikely(rte_atomic32_read(&internal->dev_attached) == 0)) { - queue_setup(eth_dev, internal); - rte_atomic32_set(&internal->dev_attached, 1); + if (!eth_dev->data->rx_queues || !eth_dev->data->tx_queues) { + RTE_LOG(ERR, PMD, "RX/TX queues not exist yet\n"); + return -1; } + if (!internal->queue_ready) + queue_setup(eth_dev, internal); + rte_atomic32_set(&internal->started, 1); update_queuing_status(eth_dev); @@ -836,10 +845,13 @@ eth_dev_close(struct rte_eth_dev *dev) pthread_mutex_unlock(&internal_list_lock); rte_free(list); - for (i = 0; i < dev->data->nb_rx_queues; i++) - rte_free(dev->data->rx_queues[i]); - for (i = 0; i < dev->data->nb_tx_queues; i++) - rte_free(dev->data->tx_queues[i]); + if (dev->data->rx_queues) + for (i = 0; i < dev->data->nb_rx_queues; i++) + rte_free(dev->data->rx_queues[i]); + + if (dev->data->tx_queues) + for (i = 0; i < dev->data->nb_tx_queues; i++) + rte_free(dev->data->tx_queues[i]); rte_free(dev->data->mac_addrs); free(internal->dev_name); -- 2.0.1