DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/iavf: fix segfaults when calling API after VF reset failed
@ 2022-04-21 17:08 Yiding Zhou
  2022-04-27 15:25 ` [PATCH v2] " Yiding Zhou
  2022-05-12 10:48 ` [PATCH v3] " Yiding Zhou
  0 siblings, 2 replies; 5+ messages in thread
From: Yiding Zhou @ 2022-04-21 17:08 UTC (permalink / raw)
  To: jingjing.wu, beilei.xing; +Cc: dev, stable, qi.z.zhang

Some pointers will be set to NULL when iavf_dev_reset() failed,
for example vf->vf_res, vf->vsi_res vf->rss_key and etc.
APIs access these NULL pointers will trigger segfault.

This patch adds closed flag to indicate that the VF is closed,
and rejects API calls in this state to avoid coredump.

Signed-off-by: Yiding Zhou <yidingx.zhou@intel.com>
---
 drivers/net/iavf/iavf.h        |  1 +
 drivers/net/iavf/iavf_ethdev.c | 59 ++++++++++++++++++++++++++++++++--
 drivers/net/iavf/iavf_rxtx.c   | 10 ++++++
 drivers/net/iavf/iavf_vchnl.c  | 17 ++++++++++
 4 files changed, 85 insertions(+), 2 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index a01d18e61b..b3b582dd21 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -298,6 +298,7 @@ struct iavf_adapter {
 	bool tx_vec_allowed;
 	uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE] __rte_cache_min_aligned;
 	bool stopped;
+	bool closed;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac24a..a3454638be 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -229,9 +229,18 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 };
 
 static int
-iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+iavf_tm_ops_get(struct rte_eth_dev *dev,
 			void *arg)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	if (!dev)
+		return -EINVAL;
+
+	if (adapter->closed)
+		return -EIO;
+
 	if (!arg)
 		return -EINVAL;
 
@@ -342,6 +351,9 @@ iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	if (adapter->closed)
+		return -EIO;
+
 	/* flush previous addresses */
 	err = iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 					false);
@@ -613,6 +625,9 @@ iavf_dev_configure(struct rte_eth_dev *dev)
 		dev->data->nb_tx_queues);
 	int ret;
 
+	if (ad->closed)
+		return -EIO;
+
 	ad->rx_bulk_alloc_allowed = true;
 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
 	 * vector Rx/Tx preconditions, it will be reset.
@@ -932,6 +947,9 @@ iavf_dev_start(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (adapter->closed)
+		return -1;
+
 	adapter->stopped = 0;
 
 	vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
@@ -1009,6 +1027,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (adapter->closed)
+		return -1;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) &&
 	    dev->data->dev_conf.intr_conf.rxq != 0)
 		rte_intr_disable(intr_handle);
@@ -1046,6 +1067,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = &adapter->vf;
 
+	if (adapter->closed)
+		return -EIO;
+
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->min_rx_bufsize = IAVF_BUF_SIZE_MIN;
@@ -1286,6 +1310,9 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
 		if (err)
@@ -1362,6 +1389,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
 		return iavf_dev_vlan_offload_set_v2(dev, mask);
 
@@ -1394,6 +1424,9 @@ iavf_dev_rss_reta_update(struct rte_eth_dev *dev,
 	uint16_t i, idx, shift;
 	int ret;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1439,6 +1472,9 @@ iavf_dev_rss_reta_query(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint16_t i, idx, shift;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1492,6 +1528,9 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
 
 	adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1545,6 +1584,9 @@ iavf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 
+	if (adapter->closed)
+		return -EIO;
+
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
 
@@ -1792,6 +1834,9 @@ iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	uint16_t msix_intr;
 
+	if (adapter->closed)
+		return -EIO;
+
 	msix_intr = rte_intr_vec_list_index_get(pci_dev->intr_handle,
 						       queue_id);
 	if (msix_intr == IAVF_MISC_VEC_ID) {
@@ -2415,9 +2460,15 @@ static int
 iavf_dev_flow_ops_get(struct rte_eth_dev *dev,
 		      const struct rte_flow_ops **ops)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
 	if (!dev)
 		return -EINVAL;
 
+	if (adapter->closed)
+		return -EIO;
+
 	*ops = &iavf_flow_ops;
 	return 0;
 }
@@ -2557,7 +2608,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	/* Start device watchdog */
 	iavf_dev_watchdog_enable(adapter);
-
+	adapter->closed = false;
 
 	return 0;
 
@@ -2585,7 +2636,11 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
 
+	if (adapter->closed)
+		return 0;
+
 	ret = iavf_dev_stop(dev);
+	adapter->closed = true;
 
 	iavf_flow_flush(dev, NULL);
 	iavf_flow_uninit(adapter);
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 16e8d021f9..a2dc669bfd 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -554,6 +554,9 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (ad->closed)
+		return -EIO;
+
 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
 	if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
@@ -715,6 +718,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (adapter->closed)
+		return -EIO;
+
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
@@ -2739,6 +2745,10 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 	struct iavf_tx_queue *txq = tx_queue;
 	struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+	if (adapter->closed)
+		return 0;
 
 	for (i = 0; i < nb_pkts; i++) {
 		m = tx_pkts[i];
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 169e1f2012..1bd3559ec2 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -265,6 +265,11 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
 
+	if (adapter->closed) {
+		PMD_DRV_LOG(DEBUG, "Port closed");
+		return;
+	}
+
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
 		return;
@@ -777,6 +782,9 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	memset(&queue_select, 0, sizeof(queue_select));
 	queue_select.vsi_id = vf->vsi_res->vsi_id;
 	if (rx)
@@ -1241,6 +1249,9 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	memset(&q_stats, 0, sizeof(q_stats));
 	q_stats.vsi_id = vf->vsi_res->vsi_id;
 	args.ops = VIRTCHNL_OP_GET_STATS;
@@ -1269,6 +1280,9 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	promisc.flags = 0;
 	promisc.vsi_id = vf->vsi_res->vsi_id;
 
@@ -1312,6 +1326,9 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	struct iavf_cmd_info args;
 	int err;
 
+	if (adapter->closed)
+		return -EIO;
+
 	list = (struct virtchnl_ether_addr_list *)cmd_buffer;
 	list->vsi_id = vf->vsi_res->vsi_id;
 	list->num_elements = 1;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-05-18  4:11 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-21 17:08 [PATCH] net/iavf: fix segfaults when calling API after VF reset failed Yiding Zhou
2022-04-27 15:25 ` [PATCH v2] " Yiding Zhou
2022-05-11  8:50   ` Huang, Peng
2022-05-12 10:48 ` [PATCH v3] " Yiding Zhou
2022-05-18  4:10   ` Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).