DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/iavf: fix segfaults when calling API after VF reset failed
@ 2022-04-21 17:08 Yiding Zhou
  2022-04-27 15:25 ` [PATCH v2] " Yiding Zhou
  2022-05-12 10:48 ` [PATCH v3] " Yiding Zhou
  0 siblings, 2 replies; 5+ messages in thread
From: Yiding Zhou @ 2022-04-21 17:08 UTC (permalink / raw)
  To: jingjing.wu, beilei.xing; +Cc: dev, stable, qi.z.zhang

Some pointers will be set to NULL when iavf_dev_reset() failed,
for example vf->vf_res, vf->vsi_res vf->rss_key and etc.
APIs access these NULL pointers will trigger segfault.

This patch adds closed flag to indicate that the VF is closed,
and rejects API calls in this state to avoid coredump.

Signed-off-by: Yiding Zhou <yidingx.zhou@intel.com>
---
 drivers/net/iavf/iavf.h        |  1 +
 drivers/net/iavf/iavf_ethdev.c | 59 ++++++++++++++++++++++++++++++++--
 drivers/net/iavf/iavf_rxtx.c   | 10 ++++++
 drivers/net/iavf/iavf_vchnl.c  | 17 ++++++++++
 4 files changed, 85 insertions(+), 2 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index a01d18e61b..b3b582dd21 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -298,6 +298,7 @@ struct iavf_adapter {
 	bool tx_vec_allowed;
 	uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE] __rte_cache_min_aligned;
 	bool stopped;
+	bool closed;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..a3454638be 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -229,9 +229,18 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 };
 
 static int
-iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+iavf_tm_ops_get(struct rte_eth_dev *dev,
 			void *arg)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	if (!dev)
+		return -EINVAL;
+
+	if (adapter->closed)
+		return -EIO;
+
 	if (!arg)
 		return -EINVAL;
 
@@ -342,6 +351,9 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	if (adapter->closed)
+		return -EIO;
+
 	/* flush previous addresses */
 	err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 					false);
@@ -613,6 +625,9 @@ iavf_dev_configure(struct rte_eth_dev *dev)
 		dev->data->nb_tx_queues);
 	int ret;
 
+	if (ad->closed)
+		return -EIO;
+
 	ad->rx_bulk_alloc_allowed = true;
 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
 	 * vector Rx/Tx preconditions, it will be reset.
@@ -932,6 +947,9 @@ iavf_dev_start(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (adapter->closed)
+		return -1;
+
 	adapter->stopped = 0;
 
 	vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
@@ -1009,6 +1027,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (adapter->closed)
+		return -1;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
 	    dev->data->dev_conf.intr_conf.rxq != 0)
 		rte_intr_disable(intr_handle);
@@ -1046,6 +1067,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = &adapter->vf;
 
+	if (adapter->closed)
+		return -EIO;
+
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
@@ -1286,6 +1310,9 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
 		if (err)
@@ -1362,6 +1389,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
 		return iavf_dev_vlan_offload_set_v2(dev, mask);
 
@@ -1394,6 +1424,9 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
 	uint16_t i, idx, shift;
 	int ret;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1439,6 +1472,9 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint16_t i, idx, shift;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1492,6 +1528,9 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
 
 	adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1545,6 +1584,9 @@ iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1792,6 +1834,9 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint16_t msix_intr;
 
+	if (adapter->closed)
+		return -EIO;
+
 	msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
 						       queue_id);
 	if (msix_intr == IAVF_MISC_VEC_ID) {
@@ -2415,9 +2460,15 @@ static int
 iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
 		      const struct rte_flow_ops **ops)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
 	if (!dev)
 		return -EINVAL;
 
+	if (adapter->closed)
+		return -EIO;
+
 	*ops = &iavf_flow_ops;
 	return 0;
 }
@@ -2557,7 +2608,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	/* Start device watchdog */
 	iavf_dev_watchdog_enable(adapter);
-
+	adapter->closed = false;
 
 	return 0;
 
@@ -2585,7 +2636,11 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
 
+	if (adapter->closed)
+		return 0;
+
 	ret = iavf_dev_stop(dev);
+	adapter->closed = true;
 
 	iavf_flow_flush(dev, NULL);
 	iavf_flow_uninit(adapter);
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..a2dc669bfd 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -554,6 +554,9 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (ad->closed)
+		return -EIO;
+
 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
@@ -715,6 +718,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (adapter->closed)
+		return -EIO;
+
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
@@ -2739,6 +2745,10 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 	struct iavf_tx_queue *txq = tx_queue;
 	struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	if (adapter->closed)
+		return 0;
 
 	for (i = 0; i < nb_pkts; i++) {
 		m = tx_pkts[i];
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 169e1f2012..1bd3559ec2 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -265,6 +265,11 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
 
+	if (adapter->closed) {
+		PMD_DRV_LOG(DEBUG, "Port closed");
+		return;
+	}
+
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
 		return;
@@ -777,6 +782,9 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	memset(&queue_select, 0, sizeof(queue_select));
 	queue_select.vsi_id = vf->vsi_res->vsi_id;
 	if (rx)
@@ -1241,6 +1249,9 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	memset(&q_stats, 0, sizeof(q_stats));
 	q_stats.vsi_id = vf->vsi_res->vsi_id;
 	args.ops = VIRTCHNL_OP_GET_STATS;
@@ -1269,6 +1280,9 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	promisc.flags = 0;
 	promisc.vsi_id = vf->vsi_res->vsi_id;
 
@@ -1312,6 +1326,9 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	list = (struct virtchnl_ether_addr_list *)cmd_buffer;
 	list->vsi_id = vf->vsi_res->vsi_id;
 	list->num_elements = 1;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2] net/iavf: fix segfaults when calling API after VF reset failed
  2022-04-21 17:08 [PATCH] net/iavf: fix segfaults when calling API after VF reset failed Yiding Zhou
@ 2022-04-27 15:25 ` Yiding Zhou
  2022-05-11  8:50   ` Huang, Peng
  2022-05-12 10:48 ` [PATCH v3] " Yiding Zhou
  1 sibling, 1 reply; 5+ messages in thread
From: Yiding Zhou @ 2022-04-27 15:25 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing
  Cc: qiming.yang, qi.z.zhang, stable, Yiding Zhou

Some pointers will be set to NULL when iavf_dev_reset() failed,
for example vf->vf_res, vf->vsi_res vf->rss_key and etc.
APIs access these NULL pointers will trigger segfault.

This patch adds closed flag to indicate that the VF is closed,
and rejects API calls in this state to avoid coredump.

Fixes: e74e1bb6280d ("net/iavf: enable port reset")
Cc: stable@dpdk.org

Signed-off-by: Yiding Zhou <yidingx.zhou@intel.com>
---
 drivers/net/iavf/iavf.h        |  1 +
 drivers/net/iavf/iavf_ethdev.c | 57 +++++++++++++++++++++++++++++++---
 drivers/net/iavf/iavf_rxtx.c   | 10 ++++++
 drivers/net/iavf/iavf_vchnl.c  | 17 ++++++++++
 4 files changed, 81 insertions(+), 4 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index a01d18e61b..b3b582dd21 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -298,6 +298,7 @@ struct iavf_adapter {
 	bool tx_vec_allowed;
 	uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE] __rte_cache_min_aligned;
 	bool stopped;
+	bool closed;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..91b6e64840 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -229,9 +229,15 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 };
 
 static int
-iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+iavf_tm_ops_get(struct rte_eth_dev *dev,
 			void *arg)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	if (adapter->closed)
+		return -EIO;
+
 	if (!arg)
 		return -EINVAL;
 
@@ -342,6 +348,9 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	if (adapter->closed)
+		return -EIO;
+
 	/* flush previous addresses */
 	err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 					false);
@@ -613,6 +622,9 @@ iavf_dev_configure(struct rte_eth_dev *dev)
 		dev->data->nb_tx_queues);
 	int ret;
 
+	if (ad->closed)
+		return -EIO;
+
 	ad->rx_bulk_alloc_allowed = true;
 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
 	 * vector Rx/Tx preconditions, it will be reset.
@@ -932,6 +944,9 @@ iavf_dev_start(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (adapter->closed)
+		return -1;
+
 	adapter->stopped = 0;
 
 	vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
@@ -1009,6 +1024,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (adapter->closed)
+		return -1;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
 	    dev->data->dev_conf.intr_conf.rxq != 0)
 		rte_intr_disable(intr_handle);
@@ -1046,6 +1064,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = &adapter->vf;
 
+	if (adapter->closed)
+		return -EIO;
+
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
@@ -1286,6 +1307,9 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
 		if (err)
@@ -1362,6 +1386,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
 		return iavf_dev_vlan_offload_set_v2(dev, mask);
 
@@ -1394,6 +1421,9 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
 	uint16_t i, idx, shift;
 	int ret;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1439,6 +1469,9 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint16_t i, idx, shift;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1492,6 +1525,9 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
 
 	adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1545,6 +1581,9 @@ iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1792,6 +1831,9 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint16_t msix_intr;
 
+	if (adapter->closed)
+		return -EIO;
+
 	msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
 						       queue_id);
 	if (msix_intr == IAVF_MISC_VEC_ID) {
@@ -2415,8 +2457,11 @@ static int
 iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
 		      const struct rte_flow_ops **ops)
 {
-	if (!dev)
-		return -EINVAL;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	if (adapter->closed)
+		return -EIO;
 
 	*ops = &iavf_flow_ops;
 	return 0;
@@ -2557,7 +2602,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	/* Start device watchdog */
 	iavf_dev_watchdog_enable(adapter);
-
+	adapter->closed = false;
 
 	return 0;
 
@@ -2585,7 +2630,11 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
 
+	if (adapter->closed)
+		return 0;
+
 	ret = iavf_dev_stop(dev);
+	adapter->closed = true;
 
 	iavf_flow_flush(dev, NULL);
 	iavf_flow_uninit(adapter);
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..a2dc669bfd 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -554,6 +554,9 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (ad->closed)
+		return -EIO;
+
 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
@@ -715,6 +718,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (adapter->closed)
+		return -EIO;
+
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
@@ -2739,6 +2745,10 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 	struct iavf_tx_queue *txq = tx_queue;
 	struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	if (adapter->closed)
+		return 0;
 
 	for (i = 0; i < nb_pkts; i++) {
 		m = tx_pkts[i];
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 169e1f2012..1bd3559ec2 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -265,6 +265,11 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
 
+	if (adapter->closed) {
+		PMD_DRV_LOG(DEBUG, "Port closed");
+		return;
+	}
+
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
 		return;
@@ -777,6 +782,9 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	memset(&queue_select, 0, sizeof(queue_select));
 	queue_select.vsi_id = vf->vsi_res->vsi_id;
 	if (rx)
@@ -1241,6 +1249,9 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	memset(&q_stats, 0, sizeof(q_stats));
 	q_stats.vsi_id = vf->vsi_res->vsi_id;
 	args.ops = VIRTCHNL_OP_GET_STATS;
@@ -1269,6 +1280,9 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	promisc.flags = 0;
 	promisc.vsi_id = vf->vsi_res->vsi_id;
 
@@ -1312,6 +1326,9 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	list = (struct virtchnl_ether_addr_list *)cmd_buffer;
 	list->vsi_id = vf->vsi_res->vsi_id;
 	list->num_elements = 1;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH v2] net/iavf: fix segfaults when calling API after VF reset failed
  2022-04-27 15:25 ` [PATCH v2] " Yiding Zhou
@ 2022-05-11  8:50   ` Huang, Peng
  0 siblings, 0 replies; 5+ messages in thread
From: Huang, Peng @ 2022-05-11  8:50 UTC (permalink / raw)
  To: Zhou, YidingX, dev, Wu, Jingjing, Xing, Beilei
  Cc: Yang, Qiming, Zhang, Qi Z, stable, Zhou, YidingX


> -----Original Message-----
> From: Yiding Zhou <yidingx.zhou@intel.com>
> Sent: Wednesday, April 27, 2022 11:25 PM
> To: dev@dpdk.org; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; stable@dpdk.org; Zhou, YidingX
> <yidingx.zhou@intel.com>
> Subject: [PATCH v2] net/iavf: fix segfaults when calling API after VF reset failed
> 
> Some pointers will be set to NULL when iavf_dev_reset() failed, for example vf-
> >vf_res, vf->vsi_res vf->rss_key and etc.
> APIs access these NULL pointers will trigger segfault.
> 
> This patch adds closed flag to indicate that the VF is closed, and rejects API calls
> in this state to avoid coredump.
> 
> Fixes: e74e1bb6280d ("net/iavf: enable port reset")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Yiding Zhou <yidingx.zhou@intel.com>
Tested-by: Peng Huang <peng.huang@intel.com>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v3] net/iavf: fix segfaults when calling API after VF reset failed
  2022-04-21 17:08 [PATCH] net/iavf: fix segfaults when calling API after VF reset failed Yiding Zhou
  2022-04-27 15:25 ` [PATCH v2] " Yiding Zhou
@ 2022-05-12 10:48 ` Yiding Zhou
  2022-05-18  4:10   ` Zhang, Qi Z
  1 sibling, 1 reply; 5+ messages in thread
From: Yiding Zhou @ 2022-05-12 10:48 UTC (permalink / raw)
  To: dev, jingjing.wu, beilei.xing; +Cc: qi.z.zhang, stable

Some pointers will be set to NULL when iavf_dev_reset() failed,
for example vf->vf_res, vf->vsi_res vf->rss_key and etc.
APIs access these NULL pointers will trigger segfault.

This patch adds closed flag to indicate that the VF is closed,
and rejects API calls in this state to avoid coredump.

Fixes: e74e1bb6280d ("net/iavf: enable port reset")
Cc: stable@dpdk.org

Signed-off-by: Yiding Zhou <yidingx.zhou@intel.com>
---
 drivers/net/iavf/iavf.h        |  1 +
 drivers/net/iavf/iavf_ethdev.c | 57 +++++++++++++++++++++++++++++++---
 drivers/net/iavf/iavf_rxtx.c   | 10 ++++++
 drivers/net/iavf/iavf_vchnl.c  | 17 ++++++++++
 4 files changed, 81 insertions(+), 4 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index dd83567e59..819510649a 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -312,6 +312,7 @@ struct iavf_adapter {
 	bool tx_vec_allowed;
 	uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE] __rte_cache_min_aligned;
 	bool stopped;
+	bool closed;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 	uint64_t phc_time;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 82672841f4..198d8299af 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -234,9 +234,15 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 };
 
 static int
-iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+iavf_tm_ops_get(struct rte_eth_dev *dev,
 			void *arg)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	if (adapter->closed)
+		return -EIO;
+
 	if (!arg)
 		return -EINVAL;
 
@@ -347,6 +353,9 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	if (adapter->closed)
+		return -EIO;
+
 	/* flush previous addresses */
 	err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 					false);
@@ -618,6 +627,9 @@ iavf_dev_configure(struct rte_eth_dev *dev)
 		dev->data->nb_tx_queues);
 	int ret;
 
+	if (ad->closed)
+		return -EIO;
+
 	ad->rx_bulk_alloc_allowed = true;
 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
 	 * vector Rx/Tx preconditions, it will be reset.
@@ -950,6 +962,9 @@ iavf_dev_start(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (adapter->closed)
+		return -1;
+
 	adapter->stopped = 0;
 
 	vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
@@ -1046,6 +1061,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (adapter->closed)
+		return -1;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
 	    dev->data->dev_conf.intr_conf.rxq != 0)
 		rte_intr_disable(intr_handle);
@@ -1083,6 +1101,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = &adapter->vf;
 
+	if (adapter->closed)
+		return -EIO;
+
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
@@ -1326,6 +1347,9 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
 		if (err)
@@ -1402,6 +1426,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
 		return iavf_dev_vlan_offload_set_v2(dev, mask);
 
@@ -1434,6 +1461,9 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
 	uint16_t i, idx, shift;
 	int ret;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1479,6 +1509,9 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint16_t i, idx, shift;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1532,6 +1565,9 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
 
 	adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1585,6 +1621,9 @@ iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1832,6 +1871,9 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint16_t msix_intr;
 
+	if (adapter->closed)
+		return -EIO;
+
 	msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
 						       queue_id);
 	if (msix_intr == IAVF_MISC_VEC_ID) {
@@ -2489,8 +2531,11 @@ static int
 iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
 		      const struct rte_flow_ops **ops)
 {
-	if (!dev)
-		return -EINVAL;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	if (adapter->closed)
+		return -EIO;
 
 	*ops = &iavf_flow_ops;
 	return 0;
@@ -2631,7 +2676,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	/* Start device watchdog */
 	iavf_dev_watchdog_enable(adapter);
-
+	adapter->closed = false;
 
 	return 0;
 
@@ -2659,7 +2704,11 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
 
+	if (adapter->closed)
+		return 0;
+
 	ret = iavf_dev_stop(dev);
+	adapter->closed = true;
 
 	iavf_flow_flush(dev, NULL);
 	iavf_flow_uninit(adapter);
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index d3b1a58b27..73e4960257 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -558,6 +558,9 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (ad->closed)
+		return -EIO;
+
 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
@@ -719,6 +722,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (adapter->closed)
+		return -EIO;
+
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
@@ -2843,6 +2849,10 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 	struct iavf_tx_queue *txq = tx_queue;
 	struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	if (adapter->closed)
+		return 0;
 
 	for (i = 0; i < nb_pkts; i++) {
 		m = tx_pkts[i];
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index b654433135..0520b1045f 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -265,6 +265,11 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
 
+	if (adapter->closed) {
+		PMD_DRV_LOG(DEBUG, "Port closed");
+		return;
+	}
+
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
 		return;
@@ -778,6 +783,9 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	memset(&queue_select, 0, sizeof(queue_select));
 	queue_select.vsi_id = vf->vsi_res->vsi_id;
 	if (rx)
@@ -1247,6 +1255,9 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	memset(&q_stats, 0, sizeof(q_stats));
 	q_stats.vsi_id = vf->vsi_res->vsi_id;
 	args.ops = VIRTCHNL_OP_GET_STATS;
@@ -1275,6 +1286,9 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	promisc.flags = 0;
 	promisc.vsi_id = vf->vsi_res->vsi_id;
 
@@ -1318,6 +1332,9 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	list = (struct virtchnl_ether_addr_list *)cmd_buffer;
 	list->vsi_id = vf->vsi_res->vsi_id;
 	list->num_elements = 1;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH v3] net/iavf: fix segfaults when calling API after VF reset failed
  2022-05-12 10:48 ` [PATCH v3] " Yiding Zhou
@ 2022-05-18  4:10   ` Zhang, Qi Z
  0 siblings, 0 replies; 5+ messages in thread
From: Zhang, Qi Z @ 2022-05-18  4:10 UTC (permalink / raw)
  To: Zhou, YidingX, dev, Wu, Jingjing, Xing, Beilei; +Cc: stable



> -----Original Message-----
> From: Zhou, YidingX <yidingx.zhou@intel.com>
> Sent: Thursday, May 12, 2022 6:49 PM
> To: dev@dpdk.org; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; stable@dpdk.org
> Subject: [PATCH v3] net/iavf: fix segfaults when calling API after VF reset failed
> 
> Some pointers will be set to NULL when iavf_dev_reset() failed, for example vf-
> >vf_res, vf->vsi_res vf->rss_key and etc.
> APIs access these NULL pointers will trigger segfault.
> 
> This patch adds closed flag to indicate that the VF is closed, and rejects API calls
> in this state to avoid coredump.
> 
> Fixes: e74e1bb6280d ("net/iavf: enable port reset")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Yiding Zhou <yidingx.zhou@intel.com>

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-05-18  4:11 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-21 17:08 [PATCH] net/iavf: fix segfaults when calling API after VF reset failed Yiding Zhou
2022-04-27 15:25 ` [PATCH v2] " Yiding Zhou
2022-05-11  8:50   ` Huang, Peng
2022-05-12 10:48 ` [PATCH v3] " Yiding Zhou
2022-05-18  4:10   ` Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).