From: Mingjin Ye <mingjinx.ye@intel.com>
To: dev@dpdk.org
Cc: qiming.yang@intel.com, stable@dpdk.org, yidingx.zhou@intel.com,
Mingjin Ye <mingjinx.ye@intel.com>,
Ke Zhang <ke1x.zhang@intel.com>, Qi Zhang <qi.z.zhang@intel.com>
Subject: [PATCH v4] net/ice: fix ice dcf control thread crash
Date: Fri, 17 Mar 2023 05:09:36 +0000 [thread overview]
Message-ID: <20230317050936.5513-1-mingjinx.ye@intel.com> (raw)
In-Reply-To: <20230315082018.4260-1-mingjinx.ye@intel.com>
The control thread accesses the hardware resources after the
resources were released, which results in a segment error.
The 'ice-reset' threads are detached, so thread resources cannot be
reclaimed by `pthread_join` calls.
This commit synchronizes the number of 'ice-reset' threads by adding two
variables (the 'vsi_update_thread_num' static global and
the 'vsi_thread_lock' static global spinlock). When releasing HW
resources, we clear the event callback function. That makes these threads
exit quickly. After the number of 'ice-reset' threads decreased to be 0,
we release resources.
Fixes: 3b3757bda3c3 ("net/ice: get VF hardware index in DCF")
Fixes: 931ee54072b1 ("net/ice: support QoS bandwidth config after VF reset in DCF")
Fixes: c7e1a1a3bfeb ("net/ice: refactor DCF VLAN handling")
Fixes: 0b02c9519432 ("net/ice: handle PF initialization by DCF")
Fixes: b71573ec2fc2 ("net/ice: retry getting VF VSI map after failure")
Fixes: 7564d5509611 ("net/ice: add DCF hardware initialization")
Cc: stable@dpdk.org
Signed-off-by: Ke Zhang <ke1x.zhang@intel.com>
Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: add pthread_exit() for windows
---
v3: Optimization. It is unsafe for a thread to forcibly exit, which
will cause the spin lock to not be released correctly
---
v4: Safely wait for all event threads to end
---
drivers/net/ice/ice_dcf.c | 18 ++++++++++++++--
drivers/net/ice/ice_dcf.h | 1 +
drivers/net/ice/ice_dcf_parent.c | 37 ++++++++++++++++++++++++++++++++
3 files changed, 54 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 1c3d22ae0f..169520f5bb 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -543,6 +543,8 @@ ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
ice_dcf_disable_irq0(hw);
for (;;) {
+ if (hw->vc_event_msg_cb == NULL)
+ break;
if (ice_dcf_get_vf_resource(hw) == 0 &&
ice_dcf_get_vf_vsi_map(hw) >= 0) {
err = 0;
@@ -555,8 +557,10 @@ ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
}
- rte_intr_enable(pci_dev->intr_handle);
- ice_dcf_enable_irq0(hw);
+ if (hw->vc_event_msg_cb != NULL) {
+ rte_intr_enable(pci_dev->intr_handle);
+ ice_dcf_enable_irq0(hw);
+ }
rte_spinlock_unlock(&hw->vc_cmd_send_lock);
@@ -749,6 +753,12 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
+ /* Clear event callbacks, `VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE`
+ * event will be ignored and all running `ice-thread` threads
+ * will exit quickly.
+ */
+ hw->vc_event_msg_cb = NULL;
+
if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
if (hw->tm_conf.committed) {
ice_dcf_clear_bw(hw);
@@ -760,6 +770,10 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
rte_intr_callback_unregister(intr_handle,
ice_dcf_dev_interrupt_handler, hw);
+ /* Wait for all `ice-thread` threads to exit. */
+ while (ice_dcf_event_handle_num() > 0)
+ rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
+
ice_dcf_mode_disable(hw);
iavf_shutdown_adminq(&hw->avf);
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index 7f42ebabe9..6c636a7497 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -143,6 +143,7 @@ int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
int ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
void *buf, uint16_t buf_size);
int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw);
+int ice_dcf_event_handle_num(void);
int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
int ice_dcf_configure_rss_key(struct ice_dcf_hw *hw);
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 01e390ddda..0ff08e179e 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -14,6 +14,9 @@
#define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL 100000 /* us */
static rte_spinlock_t vsi_update_lock = RTE_SPINLOCK_INITIALIZER;
+static rte_spinlock_t vsi_thread_lock = RTE_SPINLOCK_INITIALIZER;
+static int vsi_update_thread_num;
+
struct ice_dcf_reset_event_param {
struct ice_dcf_hw *dcf_hw;
@@ -130,6 +133,9 @@ ice_dcf_vsi_update_service_handler(void *param)
rte_spinlock_lock(&vsi_update_lock);
+ if (hw->vc_event_msg_cb == NULL)
+ goto update_end;
+
if (!ice_dcf_handle_vsi_update_event(hw)) {
__atomic_store_n(&parent_adapter->dcf_state_on, true,
__ATOMIC_RELAXED);
@@ -150,10 +156,14 @@ ice_dcf_vsi_update_service_handler(void *param)
if (hw->tm_conf.committed)
ice_dcf_replay_vf_bw(hw, reset_param->vf_id);
+update_end:
rte_spinlock_unlock(&vsi_update_lock);
free(param);
+ rte_spinlock_lock(&vsi_thread_lock);
+ vsi_update_thread_num--;
+ rte_spinlock_unlock(&vsi_thread_lock);
return NULL;
}
@@ -183,6 +193,10 @@ start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw, bool vfr, uint16_t vf_id)
PMD_DRV_LOG(ERR, "Failed to start the thread for reset handling");
free(param);
}
+
+ rte_spinlock_lock(&vsi_thread_lock);
+ vsi_update_thread_num++;
+ rte_spinlock_unlock(&vsi_thread_lock);
}
static uint32_t
@@ -262,6 +276,18 @@ ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
break;
case VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE:
+ /* If the event handling callback is empty, the event cannot
+ * be handled. Therefore we ignore this event.
+ */
+ if (dcf_hw->vc_event_msg_cb == NULL) {
+ PMD_DRV_LOG(DEBUG,
+ "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event "
+ "received: VF%u with VSI num %u, ignore processing",
+ pf_msg->event_data.vf_vsi_map.vf_id,
+ pf_msg->event_data.vf_vsi_map.vsi_id);
+ break;
+ }
+
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
pf_msg->event_data.vf_vsi_map.vf_id,
pf_msg->event_data.vf_vsi_map.vsi_id);
@@ -505,3 +531,14 @@ ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev)
ice_flow_uninit(parent_adapter);
ice_dcf_uninit_parent_hw(parent_hw);
}
+
+int ice_dcf_event_handle_num(void)
+{
+ int ret;
+
+ rte_spinlock_lock(&vsi_thread_lock);
+ ret = vsi_update_thread_num;
+ rte_spinlock_unlock(&vsi_thread_lock);
+
+ return ret;
+}
--
2.25.1
next prev parent reply other threads:[~2023-03-17 5:14 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-08 8:30 [PATCH] net/ice: fix ice dcf contrl " Ke Zhang
2023-02-09 0:05 ` Stephen Hemminger
2023-02-13 7:03 ` [PATCH v2] " Ke Zhang
2023-02-21 0:29 ` Zhang, Qi Z
2023-02-13 7:14 ` Ke Zhang
2023-02-13 7:16 ` [PATCH v2] net/ice: fix ice dcf control " Ke Zhang
2023-02-14 11:03 ` Thomas Monjalon
2023-02-16 7:53 ` Zhang, Ke1X
2023-02-20 0:30 ` Thomas Monjalon
2023-03-01 1:54 ` Zhang, Ke1X
2023-03-01 14:53 ` Kevin Traynor
2023-03-15 8:20 ` [PATCH v3] " Mingjin Ye
2023-03-15 13:06 ` Zhang, Qi Z
2023-03-17 5:09 ` Mingjin Ye [this message]
2023-03-17 10:15 ` [PATCH v4] " Zhang, Qi Z
2023-03-20 9:40 ` [PATCH v5] " Mingjin Ye
2023-03-20 12:52 ` Zhang, Qi Z
2023-03-21 2:08 ` Ye, MingjinX
2023-03-21 11:55 ` Zhang, Qi Z
2023-03-21 16:24 ` Tyler Retzlaff
2023-03-22 5:56 ` [PATCH v6] " Mingjin Ye
2023-04-03 6:54 ` Zhang, Qi Z
2023-04-11 2:08 ` [PATCH v7] " Mingjin Ye
2023-05-15 6:28 ` Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230317050936.5513-1-mingjinx.ye@intel.com \
--to=mingjinx.ye@intel.com \
--cc=dev@dpdk.org \
--cc=ke1x.zhang@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=qiming.yang@intel.com \
--cc=stable@dpdk.org \
--cc=yidingx.zhou@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).