* [dpdk-dev] [PATCH v3] bonding: fix the segfault caused by the race condition between master thread and eal-intr-thread
@ 2017-07-26 10:13 zhangsha.zhang
2017-09-04 11:54 ` Radu Nicolau
0 siblings, 1 reply; 3+ messages in thread
From: zhangsha.zhang @ 2017-07-26 10:13 UTC (permalink / raw)
To: dev, declan.doherty; +Cc: jerry.lilijun, zhoujingbin, caihe, Sha Zhang
From: Sha Zhang <zhangsha.zhang@huawei.com>
Function slave_configure calls functions bond_ethdev_lsc_event_callback and
slave_eth_dev->dev_ops->link_update to fix updating slave link status.
But there is a low probability that process may be crashed if the master
thread, which create bonding-device, adds the active_slave_count of the
bond to nozero while the rx_ring or tx_ring of it haven't been created.
This patch moves the functions bond_ethdev_lsc_event_callback and
slave_eth_dev->dev_ops->link_update to eal-intr-thread to aviod the
competition.
Fixes: 210903803f6e ("net/bonding: fix updating slave link status")
Signed-off-by: Sha Zhang <zhangsha.zhang@huawei.com>
---
drivers/net/bonding/rte_eth_bond_pmd.c | 58 +++++++++++++++++++++++++++++-----
1 file changed, 50 insertions(+), 8 deletions(-)
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 383e27c..bc0ee7f 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -53,6 +53,7 @@
#define REORDER_PERIOD_MS 10
#define DEFAULT_POLLING_INTERVAL_10_MS (10)
+#define BOND_LSC_DELAY_TIME_US (10 * 1000)
#define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
@@ -1800,14 +1801,6 @@ struct bwg_slave {
}
}
- /* If lsc interrupt is set, check initial slave's link status */
- if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
- slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
- bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
- RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
- NULL);
- }
-
return 0;
}
@@ -1878,6 +1871,51 @@ struct bwg_slave {
static void
bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
+static void
+bond_ethdev_slave_lsc_delay(void *cb_arg)
+{
+ struct rte_eth_dev *bonded_ethdev, *slave_dev;
+ struct bond_dev_private *internals;
+
+ /* Default value for polling slave found is true as we don't
+ * want todisable the polling thread if we cannot get the lock.
+ */
+ int i = 0;
+
+ if (!cb_arg)
+ return;
+
+ bonded_ethdev = (struct rte_eth_dev *)cb_arg;
+ if (!bonded_ethdev->data->dev_started)
+ return;
+
+ internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
+ if (!rte_spinlock_trylock(&internals->lock)) {
+ rte_eal_alarm_set(BOND_LSC_DELAY_TIME_US * 10,
+ bond_ethdev_slave_lsc_delay,
+ (void *)&rte_eth_devices[internals->port_id]);
+ return;
+ }
+
+ for (i = 0; i < internals->slave_count; i++) {
+ slave_dev = &(rte_eth_devices[internals->slaves[i].port_id]);
+ if (slave_dev->data->dev_conf.intr_conf.lsc != 0) {
+ if (slave_dev->dev_ops &&
+ slave_dev->dev_ops->link_update)
+ slave_dev->dev_ops->link_update(slave_dev, 0);
+ bond_ethdev_lsc_event_callback(
+ internals->slaves[i].port_id,
+ RTE_ETH_EVENT_INTR_LSC,
+ &bonded_ethdev->data->port_id, NULL);
+ }
+ }
+ rte_spinlock_unlock(&internals->lock);
+ RTE_LOG(INFO, EAL,
+ "bond %s(%u): slave num %d, current active slave num %d\n",
+ bonded_ethdev->data->name, bonded_ethdev->data->port_id,
+ internals->slave_count, internals->active_slave_count);
+}
+
static int
bond_ethdev_start(struct rte_eth_dev *eth_dev)
{
@@ -1953,6 +1991,10 @@ struct bwg_slave {
if (internals->slaves[i].link_status_poll_enabled)
internals->link_status_polling_enabled = 1;
}
+
+ rte_eal_alarm_set(BOND_LSC_DELAY_TIME_US, bond_ethdev_slave_lsc_delay,
+ (void *)&rte_eth_devices[internals->port_id]);
+
/* start polling if needed */
if (internals->link_status_polling_enabled) {
rte_eal_alarm_set(
--
1.8.3.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [dpdk-dev] [PATCH v3] bonding: fix the segfault caused by the race condition between master thread and eal-intr-thread
2017-07-26 10:13 [dpdk-dev] [PATCH v3] bonding: fix the segfault caused by the race condition between master thread and eal-intr-thread zhangsha.zhang
@ 2017-09-04 11:54 ` Radu Nicolau
2017-11-29 17:45 ` Ferruh Yigit
0 siblings, 1 reply; 3+ messages in thread
From: Radu Nicolau @ 2017-09-04 11:54 UTC (permalink / raw)
To: zhangsha.zhang, dev, declan.doherty; +Cc: jerry.lilijun, zhoujingbin, caihe
Hi,
Wouldn't be possible to treat the section of code that segfaults as a
critical one, i.e. use the lock/unlock instead of triggering alarms?
On 7/26/2017 11:13 AM, zhangsha.zhang@huawei.com wrote:
> From: Sha Zhang <zhangsha.zhang@huawei.com>
>
> Function slave_configure calls functions bond_ethdev_lsc_event_callback and
> slave_eth_dev->dev_ops->link_update to fix updating slave link status.
> But there is a low probability that process may be crashed if the master
> thread, which create bonding-device, adds the active_slave_count of the
> bond to nozero while the rx_ring or tx_ring of it haven't been created.
>
> This patch moves the functions bond_ethdev_lsc_event_callback and
> slave_eth_dev->dev_ops->link_update to eal-intr-thread to aviod the
> competition.
>
> Fixes: 210903803f6e ("net/bonding: fix updating slave link status")
>
> Signed-off-by: Sha Zhang <zhangsha.zhang@huawei.com>
> ---
> drivers/net/bonding/rte_eth_bond_pmd.c | 58 +++++++++++++++++++++++++++++-----
> 1 file changed, 50 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
> index 383e27c..bc0ee7f 100644
> --- a/drivers/net/bonding/rte_eth_bond_pmd.c
> +++ b/drivers/net/bonding/rte_eth_bond_pmd.c
> @@ -53,6 +53,7 @@
>
> #define REORDER_PERIOD_MS 10
> #define DEFAULT_POLLING_INTERVAL_10_MS (10)
> +#define BOND_LSC_DELAY_TIME_US (10 * 1000)
>
> #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
>
> @@ -1800,14 +1801,6 @@ struct bwg_slave {
> }
> }
>
> - /* If lsc interrupt is set, check initial slave's link status */
> - if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
> - slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
> - bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
> - RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
> - NULL);
> - }
> -
> return 0;
> }
>
> @@ -1878,6 +1871,51 @@ struct bwg_slave {
> static void
> bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
>
> +static void
> +bond_ethdev_slave_lsc_delay(void *cb_arg)
> +{
> + struct rte_eth_dev *bonded_ethdev, *slave_dev;
> + struct bond_dev_private *internals;
> +
> + /* Default value for polling slave found is true as we don't
> + * want todisable the polling thread if we cannot get the lock.
> + */
> + int i = 0;
> +
> + if (!cb_arg)
> + return;
> +
> + bonded_ethdev = (struct rte_eth_dev *)cb_arg;
> + if (!bonded_ethdev->data->dev_started)
> + return;
> +
> + internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
> + if (!rte_spinlock_trylock(&internals->lock)) {
> + rte_eal_alarm_set(BOND_LSC_DELAY_TIME_US * 10,
> + bond_ethdev_slave_lsc_delay,
> + (void *)&rte_eth_devices[internals->port_id]);
> + return;
> + }
> +
> + for (i = 0; i < internals->slave_count; i++) {
> + slave_dev = &(rte_eth_devices[internals->slaves[i].port_id]);
> + if (slave_dev->data->dev_conf.intr_conf.lsc != 0) {
> + if (slave_dev->dev_ops &&
> + slave_dev->dev_ops->link_update)
> + slave_dev->dev_ops->link_update(slave_dev, 0);
> + bond_ethdev_lsc_event_callback(
> + internals->slaves[i].port_id,
> + RTE_ETH_EVENT_INTR_LSC,
> + &bonded_ethdev->data->port_id, NULL);
> + }
> + }
> + rte_spinlock_unlock(&internals->lock);
> + RTE_LOG(INFO, EAL,
> + "bond %s(%u): slave num %d, current active slave num %d\n",
> + bonded_ethdev->data->name, bonded_ethdev->data->port_id,
> + internals->slave_count, internals->active_slave_count);
> +}
> +
> static int
> bond_ethdev_start(struct rte_eth_dev *eth_dev)
> {
> @@ -1953,6 +1991,10 @@ struct bwg_slave {
> if (internals->slaves[i].link_status_poll_enabled)
> internals->link_status_polling_enabled = 1;
> }
> +
> + rte_eal_alarm_set(BOND_LSC_DELAY_TIME_US, bond_ethdev_slave_lsc_delay,
> + (void *)&rte_eth_devices[internals->port_id]);
> +
> /* start polling if needed */
> if (internals->link_status_polling_enabled) {
> rte_eal_alarm_set(
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [dpdk-dev] [PATCH v3] bonding: fix the segfault caused by the race condition between master thread and eal-intr-thread
2017-09-04 11:54 ` Radu Nicolau
@ 2017-11-29 17:45 ` Ferruh Yigit
0 siblings, 0 replies; 3+ messages in thread
From: Ferruh Yigit @ 2017-11-29 17:45 UTC (permalink / raw)
To: Radu Nicolau, zhangsha.zhang, dev, declan.doherty
Cc: jerry.lilijun, zhoujingbin, caihe
On 9/4/2017 4:54 AM, Radu Nicolau wrote:
> Hi,
>
> Wouldn't be possible to treat the section of code that segfaults as a
> critical one, i.e. use the lock/unlock instead of triggering alarms?
Hi Sha,
Any update? Is this patch still valid?
Thanks,
ferruh
>
>
> On 7/26/2017 11:13 AM, zhangsha.zhang@huawei.com wrote:
>> From: Sha Zhang <zhangsha.zhang@huawei.com>
>>
>> Function slave_configure calls functions bond_ethdev_lsc_event_callback and
>> slave_eth_dev->dev_ops->link_update to fix updating slave link status.
>> But there is a low probability that process may be crashed if the master
>> thread, which create bonding-device, adds the active_slave_count of the
>> bond to nozero while the rx_ring or tx_ring of it haven't been created.
>>
>> This patch moves the functions bond_ethdev_lsc_event_callback and
>> slave_eth_dev->dev_ops->link_update to eal-intr-thread to aviod the
>> competition.
>>
>> Fixes: 210903803f6e ("net/bonding: fix updating slave link status")
>>
>> Signed-off-by: Sha Zhang <zhangsha.zhang@huawei.com>
>> ---
>> drivers/net/bonding/rte_eth_bond_pmd.c | 58 +++++++++++++++++++++++++++++-----
>> 1 file changed, 50 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
>> index 383e27c..bc0ee7f 100644
>> --- a/drivers/net/bonding/rte_eth_bond_pmd.c
>> +++ b/drivers/net/bonding/rte_eth_bond_pmd.c
>> @@ -53,6 +53,7 @@
>>
>> #define REORDER_PERIOD_MS 10
>> #define DEFAULT_POLLING_INTERVAL_10_MS (10)
>> +#define BOND_LSC_DELAY_TIME_US (10 * 1000)
>>
>> #define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
>>
>> @@ -1800,14 +1801,6 @@ struct bwg_slave {
>> }
>> }
>>
>> - /* If lsc interrupt is set, check initial slave's link status */
>> - if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
>> - slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
>> - bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
>> - RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id,
>> - NULL);
>> - }
>> -
>> return 0;
>> }
>>
>> @@ -1878,6 +1871,51 @@ struct bwg_slave {
>> static void
>> bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev);
>>
>> +static void
>> +bond_ethdev_slave_lsc_delay(void *cb_arg)
>> +{
>> + struct rte_eth_dev *bonded_ethdev, *slave_dev;
>> + struct bond_dev_private *internals;
>> +
>> + /* Default value for polling slave found is true as we don't
>> + * want todisable the polling thread if we cannot get the lock.
>> + */
>> + int i = 0;
>> +
>> + if (!cb_arg)
>> + return;
>> +
>> + bonded_ethdev = (struct rte_eth_dev *)cb_arg;
>> + if (!bonded_ethdev->data->dev_started)
>> + return;
>> +
>> + internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
>> + if (!rte_spinlock_trylock(&internals->lock)) {
>> + rte_eal_alarm_set(BOND_LSC_DELAY_TIME_US * 10,
>> + bond_ethdev_slave_lsc_delay,
>> + (void *)&rte_eth_devices[internals->port_id]);
>> + return;
>> + }
>> +
>> + for (i = 0; i < internals->slave_count; i++) {
>> + slave_dev = &(rte_eth_devices[internals->slaves[i].port_id]);
>> + if (slave_dev->data->dev_conf.intr_conf.lsc != 0) {
>> + if (slave_dev->dev_ops &&
>> + slave_dev->dev_ops->link_update)
>> + slave_dev->dev_ops->link_update(slave_dev, 0);
>> + bond_ethdev_lsc_event_callback(
>> + internals->slaves[i].port_id,
>> + RTE_ETH_EVENT_INTR_LSC,
>> + &bonded_ethdev->data->port_id, NULL);
>> + }
>> + }
>> + rte_spinlock_unlock(&internals->lock);
>> + RTE_LOG(INFO, EAL,
>> + "bond %s(%u): slave num %d, current active slave num %d\n",
>> + bonded_ethdev->data->name, bonded_ethdev->data->port_id,
>> + internals->slave_count, internals->active_slave_count);
>> +}
>> +
>> static int
>> bond_ethdev_start(struct rte_eth_dev *eth_dev)
>> {
>> @@ -1953,6 +1991,10 @@ struct bwg_slave {
>> if (internals->slaves[i].link_status_poll_enabled)
>> internals->link_status_polling_enabled = 1;
>> }
>> +
>> + rte_eal_alarm_set(BOND_LSC_DELAY_TIME_US, bond_ethdev_slave_lsc_delay,
>> + (void *)&rte_eth_devices[internals->port_id]);
>> +
>> /* start polling if needed */
>> if (internals->link_status_polling_enabled) {
>> rte_eal_alarm_set(
>
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2017-11-29 17:45 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-07-26 10:13 [dpdk-dev] [PATCH v3] bonding: fix the segfault caused by the race condition between master thread and eal-intr-thread zhangsha.zhang
2017-09-04 11:54 ` Radu Nicolau
2017-11-29 17:45 ` Ferruh Yigit
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).